diff --git a/.github/workflows/dependabot-automerge.yml b/.github/workflows/dependabot-automerge.yml index 0732d87..1342b42 100644 --- a/.github/workflows/dependabot-automerge.yml +++ b/.github/workflows/dependabot-automerge.yml @@ -13,42 +13,38 @@ concurrency: cancel-in-progress: false jobs: - check-dependabot: - timeout-minutes: 5 - if: > - github.event.pull_request.user.login == 'dependabot[bot]' && - github.event.pull_request.base.ref == 'main' + get_info: + if: github.actor == 'dependabot[bot]' runs-on: ubuntu-latest outputs: - should_process: ${{ steps.check.outputs.should_process }} + commit_hash: ${{ steps.get_hash.outputs.commit_hash }} + update-type: ${{ steps.metadata.outputs.update-type }} steps: - - name: Fetch Dependabot metadata - id: meta - uses: dependabot/fetch-metadata@v2 + - name: Checkout repository + uses: actions/checkout@v5 with: - github-token: ${{ secrets.GH_AUTOMERGE_PAT }} - - - name: Check if upstream submodule - id: check - run: | - if [[ "${{ steps.meta.outputs.package-ecosystem }}" == "submodules" ]] && \ - [[ "${{ steps.meta.outputs.dependency-names }}" == *"upstream"* ]]; then - echo "should_process=true" >> $GITHUB_OUTPUT - else - echo "should_process=false" >> $GITHUB_OUTPUT - fi - - # Generate docs after dependabot updates the submodule + submodules: true + + - name: Get PR metadata + id: metadata + uses: dependabot/fetch-metadata@v2.4.0 + + - name: Get submodule commit hash + id: get_hash + working-directory: ./upstream + run: echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + generate-docs: - needs: check-dependabot - if: needs.check-dependabot.outputs.should_process == 'true' + needs: get_info + if: github.actor == 'dependabot[bot]' uses: ./.github/workflows/pull-from-bazel-build.yml + with: + bazelCommitHash: ${{ needs.get_info.outputs.commit_hash }} secrets: inherit - # Auto-merge after docs are generated - enable-automerge: - needs: [check-dependabot, generate-docs] - if: needs.check-dependabot.outputs.should_process == 'true' + automerge: + if: github.actor == 'dependabot[bot]' + needs: generate-docs runs-on: ubuntu-latest steps: - name: Enable auto-merge (squash) diff --git a/.github/workflows/nightly-release.yaml b/.github/workflows/nightly-release.yaml new file mode 100644 index 0000000..1e892f4 --- /dev/null +++ b/.github/workflows/nightly-release.yaml @@ -0,0 +1,181 @@ +name: Nightly Release Documentation Build + +on: + schedule: + - cron: '0 0 * * *' # Run nightly at midnight + workflow_dispatch: # Allow manual runs + +jobs: + setup: + runs-on: ubuntu-latest + outputs: + versions: ${{ steps.get_versions.outputs.versions }} + steps: + - name: Checkout repository + uses: actions/checkout@v5 + + - name: Get versions + id: get_versions + run: echo "versions=$(jq -c . docs-versions.json)" >> $GITHUB_OUTPUT + + nightly-release: + needs: setup + strategy: + matrix: + version: ${{ fromJson(needs.setup.outputs.versions) }} + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + submodules: false + + - name: Checkout submodules + run: git submodule update --init -- upstream + + - name: Checkout correct submodule version + working-directory: upstream + run: | + git fetch origin + if [ "${{ matrix.version }}" == "HEAD" ]; then + git checkout master + else + git checkout "${{ matrix.version }}" + fi + + - name: Setup Bazel + uses: bazel-contrib/setup-bazel@0.15.0 + with: + bazelisk-cache: true + repository-cache: true + + - name: Build reference documentation + id: build_ref_docs + working-directory: upstream + continue-on-error: true + run: > + bazel build + --config=docs + --build_metadata=ROLE=DOCS + --remote_header=x-buildbuddy-api-key=${{ secrets.BUILDBUDDY_ORG_API_KEY }} + --bes_results_url=https://app.buildbuddy.io/invocation/ + --bes_backend=grpcs://remote.buildbuddy.io + --remote_cache=grpcs://remote.buildbuddy.io + --remote_timeout=10m + //src/main/java/com/google/devtools/build/lib:gen_reference_docs + + - name: Set up Go + uses: actions/setup-go@v6 + with: + go-version: '1.25.2' + + - name: Initialize Go module for converter + run: | + cd html2md_converter + go mod init html-to-md-converter + go get github.com/JohannesKaufmann/html-to-markdown + + - name: Build HTML to Markdown converter + run: | + cd html2md_converter + go build -o html-to-md main.go + + - name: Convert reference documentation HTML to Markdown + if: steps.build_ref_docs.outcome == 'success' + run: | + ./html2md_converter/html-to-md \ + -zip upstream/bazel-bin/src/main/java/com/google/devtools/build/lib/reference-docs.zip \ + -output reference-docs-temp + + - name: Handle failed reference doc build + if: steps.build_ref_docs.outcome != 'success' + run: | + echo "Warning: Could not build reference docs for ${{ matrix.version }}. Reference docs may be incomplete for this version." + rm -rf reference-docs-temp + mkdir -p reference-docs-temp + + - name: Copy Docs + run: | + DEST_DIR="." + if [ "${{ matrix.version }}" != "HEAD" ]; then + DEST_DIR="versions/${{ matrix.version }}" + fi + echo "Copying docs to directory: $DEST_DIR" + ./copy-upstream-docs.sh "$DEST_DIR" + + - name: Upload generated docs as artifact + uses: actions/upload-artifact@v5 + with: + name: docs-${{ matrix.version }} + path: | + ./**/*.mdx + !./versions/ + ./versions/${{ matrix.version }} + retention-days: 1 + + publish: + needs: nightly-release + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + token: ${{ secrets.GH_AUTOMERGE_PAT }} + + - name: Clean up old docs + run: | + # Remove all generated docs from the root, except for index.mdx and any other non-generated files. + find . -maxdepth 1 -name "*.mdx" -not -name "index.mdx" -delete + # Remove all old version directories + rm -rf versions + + - name: Download all generated docs + uses: actions/download-artifact@v6 + + - name: Combine all docs + run: | + # The download action places each artifact in its own directory. + # We need to move them to the correct final locations. + find . -mindepth 2 -name "*.mdx" -exec mv {} . \; + + - name: Create versioned navigation + run: ./docs.json.update.sh + + - name: Configure Git + run: | + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + + - name: Create Pull Request with updated docs + env: + GH_TOKEN: ${{ secrets.GH_AUTOMERGE_PAT }} + run: | + set -euo pipefail + + # Only proceed if there are changes + if [[ -z "$(git status --porcelain)" ]]; then + echo "No changes detected, skipping PR creation." + exit 0 + fi + + BRANCH_NAME="nightly-doc-update-$(date +%Y-%m-%d)" + git checkout -b "$BRANCH_NAME" + + echo "Changes detected, committing and pushing to new branch..." + git add -A + git commit -m $'chore: nightly documentation update from upstream' + git push origin "$BRANCH_NAME" --force + + echo "Creating Pull Request..." + gh pr create \ + --title "Nightly Documentation Update" \ + --body "Automated nightly update of documentation from the upstream Bazel repository." \ + --base main \ + --head "$BRANCH_NAME" + + echo "Enabling auto-merge for the PR..." + gh pr merge --auto --squash "$BRANCH_NAME" diff --git a/.github/workflows/pull-from-bazel-build.yml b/.github/workflows/pull-from-bazel-build.yml index 6674175..31a413f 100644 --- a/.github/workflows/pull-from-bazel-build.yml +++ b/.github/workflows/pull-from-bazel-build.yml @@ -30,6 +30,17 @@ jobs: - name: Checkout submodules run: git submodule update --init -- upstream + - name: Get submodule commit hash + working-directory: upstream + run: git rev-parse HEAD + + - name: Update submodule to latest master (if no specific commit is given) + if: ${{ inputs.bazelCommitHash == '' }} + working-directory: upstream + run: | + git fetch origin master + git checkout master + - name: Checkout commit of Bazel Build submodule if: ${{ inputs.bazelCommitHash != '' }} working-directory: upstream @@ -88,11 +99,8 @@ jobs: -zip upstream/bazel-bin/src/main/java/com/google/devtools/build/lib/reference-docs.zip \ -output reference-docs-temp - - name: Transform upstream docs to mdx + - name: Copy HEAD docs run: ./copy-upstream-docs.sh - - - name: Create versioned navigation - run: ./docs.json.update.sh - name: Clean up temporary files run: rm -rf reference-docs-temp diff --git a/6.5.0/about/faq.mdx b/6.5.0/about/faq.mdx deleted file mode 100644 index 41bf23f..0000000 --- a/6.5.0/about/faq.mdx +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: 'FAQ' ---- - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/docs/remote-execution) and [remote caching](/docs/remote-caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/community/roadmaps). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/rules/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/guide). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Bazel as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/6.5.0/about/intro.mdx b/6.5.0/about/intro.mdx deleted file mode 100644 index 278f945..0000000 --- a/6.5.0/about/intro.mdx +++ /dev/null @@ -1,110 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/docs/query-how-to) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/rules/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/concepts/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/docs/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/docs/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/docs/query-how-to) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/getting-started) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/tutorials/cpp) -* [Tutorial: Build a Java Project](/tutorials/java) -* [Tutorial: Build an Android Application](/tutorials/android-app) -* [Tutorial: Build an iOS Application](/tutorials/ios-app) diff --git a/6.5.0/about/roadmap.mdx b/6.5.0/about/roadmap.mdx deleted file mode 100644 index 3ea14e3..0000000 --- a/6.5.0/about/roadmap.mdx +++ /dev/null @@ -1,106 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - -## Overview -The Bazel project constantly evolves in response to your needs — developing features and providing support while maintaining, refactoring, and improving the performance of the core product. - -With these changes, we’re looking to keep our open-source community informed and included. This roadmap describes current initiatives and predictions for the future of Bazel development, giving you visibility into current priorities and ongoing projects. - -This roadmap snapshots targets, and should not be taken as guarantees. Priorities are subject to change in response to developer and customer feedback, or new market opportunities. - -To be notified of new features — including updates to this roadmap — join the [Google Group](https://groups.google.com/g/bazel-discuss) community. - -## Q4 — Bazel 6.0 Release - -Q4 brings Bazel 6.0 — the new [long term support (LTS)](https://bazel.build/release/versioning) version. Bazel 6.0 plans to include new powerful and community-requested features for managing dependencies, developing with Android, and more. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/external/overview#bzlmod) automatically resolves transitive dependencies, allowing projects to scale while staying fast and resource-efficient. Introduced experimentally in Bazel 5.0, Bzlmod will be generally available and provide a solution for the [diamond dependency problem](https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc). - -* Bzlmod goes from ‘experimental’ to ‘generally available’ -* Includes support for `rules\_jvm\_external`, allowing users to download Maven dependencies for Java projects -* [Bzlmod Migration Guide](https://docs.google.com/document/d/1JtXIVnXyFZ4bmbiBCr5gsTH4-opZAFf5DMMb-54kES0/edit?usp=gmail) provides tools, scripts, and documentation to teams looking to adopt Bzlmod -* The [Bazel central repository](https://github.com/bazelbuild/bazel-central-registry) hosts core Bazel `BUILD` rules (`rules\_jvm\_external`, `rules\_go`, `rules\_python`, `rules\_nodejs`) and key dependencies required for Bzlmod - -For more on this development, watch the [Bzlmod community update](https://www.youtube.com/watch?v=MuW5XNcFukE) or read the [original design doc](https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc). - -### Android app build with Bazel - -Bazel 6.0 will include improved tooling and merged-in community feature contributions. Anticipating further adoption and a growing codebase, the Bazel team will prioritize integration of Android build tools with Bazel Android rules. - -* Updates D8 to v. 3.3.28 and sets it as the [default dexer](https://github.com/bazelbuild/bazel/issues/10240). -* Merges to main community feature contributions added in 5.X including support for: - * Persistent workers with D8 - * Desugaring using D8 - * Merging "uses-permissions" tags in Android manifests - * Multiplex workers in Android resource processing - -### Optional toolchains - -Our Developer Satisfaction survey showed that rule authors want support for further toolchain development. Bazel 6.0 will allow authors to write rules using an [optional, high performance toolchain](https://bazel.build/docs/toolchains#optional-toolchains) when available with a fallback implementation for other platforms. - -### Bazel-JetBrains\* IntelliJ IDEA support - -JetBrains has partnered with Bazel to co-maintain the [Bazel IntelliJ IDEA plugin](https://plugins.jetbrains.com/plugin/8609-bazel), supporting the goal of increasing community stewardship and opening up capacity for feature requests and development. - -* IntelliJ plugin v. 2022.2 provides support for the latest JetBrains plugin release -* Increases compatibility with remote development -* Furthers community-driven development for in-flight features such as Scala support - -For more on this development, read the Bazel-JetBrains [blog announcement](https://blog.bazel.build/2022/07/11/Bazel-IntelliJ-Update.html). - -## Future development - -Looking ahead, the Bazel team has begun development or anticipates prioritizing the following features in 2023 and beyond. - -### Improving Bazel's Android build rules - -Continue to invest in the Android app development experience, focusing on the workflow through build, test, and deployment. - -* Migration to and support for R8 -* Updates to the Android rules, including translation to the Starlark language -* Support for App Bundle -* Support for recent NDK versions -* Test code coverage - -### OSS license compliance tools - -Developers requested a robust license compliance checker to ensure the availability and security of included packages. This project provides a set of rules and tools to help identify and mitigate compliance and license risks associated with a given software component. Target features include: - -* The ability to audit the packages used by a given target -* The ability to build organization specific license compliance checks. - -See the in-progress [rules\_license implementation](https://github.com/bazelbuild/rules_license) on Github. - -### Bzlmod: external dependency management system - -At launch, Bzlmod improves the scalability and reliability of transitive dependencies. Over the next three years, Bzlmod aims to replace `WORKSPACE` as the default Bazel workspace dependency management subsystem. Targeted features include: - -* Support for hermetic builds -* Vendor/offline mode pinning versioned references rules to a local copy -* Bazel Central Registry includes regular community contribution and adoption of key Bazel rules & projects -* Bzlmod becomes the default tool for building Bazel projects - -### Signed builds - -Bazel will provide trusted binaries for Windows and Mac signed with Google keys. This feature enables multi-platform developers/dev-ops to identify the source of Bazel binaries and protect their systems from malicious, unverified binaries. - -### Standardized Platforms API - -The new Platforms API will standardize the architecture configuration for multi-language, multi-platform builds. With this feature, developers can reduce costly development-time errors and complexity in their large builds. - -### Build analysis metrics - -Bazel telemetry will provide analysis-phase time metrics, letting developers optimize their own build performance. - -### Remote execution with “Builds without the Bytes” - -[Builds without the Bytes](https://github.com/bazelbuild/bazel/issues/6862) will optimize performance by only allowing Bazel to download needed artifacts, preventing builds from bottlenecking on network bandwidth. Features added for remote builds include: - -* Use asynchronous download to let local and remote actions kick off as soon as they’ve downloaded their dependent outputs -* Add Symlinks support -* Retrieve intermediate outputs from remote actions once a build completes - -_\*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o._ diff --git a/6.5.0/about/vision.mdx b/6.5.0/about/vision.mdx deleted file mode 100644 index 85c57e6..0000000 --- a/6.5.0/about/vision.mdx +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/docs/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/6.5.0/basics/artifact-based-builds.mdx b/6.5.0/basics/artifact-based-builds.mdx deleted file mode 100644 index 85d9565..0000000 --- a/6.5.0/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,278 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/docs/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/rules/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/6.5.0/basics/build-systems.mdx b/6.5.0/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/6.5.0/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/6.5.0/basics/dependencies.mdx b/6.5.0/basics/dependencies.mdx deleted file mode 100644 index 362b9e0..0000000 --- a/6.5.0/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need to -have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the -[`buildtools` directory](https://github.com/bazelbuild/buildtools). - - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel -[enforces strict transitive dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version Rule](https://opensource.google/docs/thirdparty/oneversion/) -for all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -For this reason, Bazel does not automatically download transitive dependencies. -And, unfortunately, there’s no silver bullet—Bazel’s alternative is to require a -global file that lists every single one of the repository’s external -dependencies and an explicit version used for that dependency throughout the -repository. Fortunately, Bazel provides tools that are able to automatically -generate such a file containing the transitive dependencies of a set of Maven -artifacts. This tool can be run once to generate the initial `WORKSPACE` file -for a project, and that file can then be manually updated to adjust the versions -of each dependency. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location -that is shared across engineers, so if a developer depends on an artifact that -was recently built by someone else, the build system automatically downloads -it instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) goes -down, because your entire build might grind to a halt if it’s unable to download -an external dependency. There’s also a security risk: if the third-party system -is compromised by an attacker, the attacker could replace the referenced -artifact with one of their own design, allowing them to inject arbitrary code -into your build. Both problems can be mitigated by mirroring any artifacts you -depend on onto servers you control and blocking your build system from accessing -third-party artifact repositories like Maven Central. The trade-off is that -these mirrors take effort and resources to maintain, so the choice of whether to -use them often depends on the scale of the project. The security issue can also -be completely prevented with little overhead by requiring the hash of each -third-party artifact to be specified in the source repository, causing the build -to fail if the artifact is tampered with. Another alternative that completely -sidesteps the issue is to vendor your project’s dependencies. When a project -vendors its dependencies, it checks them into source control alongside the -project’s source code, either as source or as binaries. This effectively means -that all of the project’s external dependencies are converted to internal -dependencies. Google uses this approach internally, checking every third-party -library referenced throughout Google into a `third_party` directory at the root -of Google’s source tree. However, this works at Google only because Google’s -source control system is custom built to handle an extremely large monorepo, so -vendoring might not be an option for all organizations. diff --git a/6.5.0/basics/distributed-builds.mdx b/6.5.0/basics/distributed-builds.mdx deleted file mode 100644 index 8977c18..0000000 --- a/6.5.0/basics/distributed-builds.mdx +++ /dev/null @@ -1,136 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/6.5.0/basics/hermeticity.mdx b/6.5.0/basics/hermeticity.mdx deleted file mode 100644 index 93d79ff..0000000 --- a/6.5.0/basics/hermeticity.mdx +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: 'Hermeticity' ---- - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/docs/remote-execution-caching-debug#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/docs/remote-execution-rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/docs/workspace-log) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/6.5.0/basics/index.mdx b/6.5.0/basics/index.mdx deleted file mode 100644 index cb3c89b..0000000 --- a/6.5.0/basics/index.mdx +++ /dev/null @@ -1,56 +0,0 @@ ---- -title: 'Build Basics' ---- - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe_at_google.2.pdf#page=399). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/6.5.0/basics/task-based-builds.mdx b/6.5.0/basics/task-based-builds.mdx deleted file mode 100644 index 35379ef..0000000 --- a/6.5.0/basics/task-based-builds.mdx +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/6.5.0/build/share-variables.mdx b/6.5.0/build/share-variables.mdx deleted file mode 100644 index 0067983..0000000 --- a/6.5.0/build/share-variables.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of a target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/6.5.0/build/style-guide.mdx b/6.5.0/build/style-guide.mdx deleted file mode 100644 index 9bb0bfa..0000000 --- a/6.5.0/build/style-guide.mdx +++ /dev/null @@ -1,231 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/6.5.0/community/recommended-rules.mdx b/6.5.0/community/recommended-rules.mdx deleted file mode 100644 index a735f82..0000000 --- a/6.5.0/community/recommended-rules.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/contribution-policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/6.5.0/community/remote-execution-services.mdx b/6.5.0/community/remote-execution-services.mdx deleted file mode 100644 index eaa1796..0000000 --- a/6.5.0/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [Scoot](https://github.com/twitter/scoot) - -* Commercial - - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service. Can be self-hosted or hosted. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [Flare](https://www.flare.build) - Providing a cache + CDN for Bazel - artifacts and Apple-focused remote builds in addition to build & test - analytics. diff --git a/6.5.0/community/roadmaps-build-api.mdx b/6.5.0/community/roadmaps-build-api.mdx deleted file mode 100644 index dc05908..0000000 --- a/6.5.0/community/roadmaps-build-api.mdx +++ /dev/null @@ -1,136 +0,0 @@ - ---- -title: 'Bazel Build API 2021 Roadmap' ---- - - -*Last verified: 2021-04-09* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/build-api.md)) - -*Point of contact:* [comius](https://github.com/comius) - -*Discuss:* -[Build API roadmap: discussion](https://github.com/bazelbuild/bazel/issues/13008) - -## Scope - -Build API team is covering native rule implementations and the native API -exposed to Starlark. - -## Goal - -Have all rules implemented in Starlark and handed over to teams specialised in -the particular language. Remove language specific logic from Bazel’s core. - -## Java rules - -The Java rules will first be rewritten to Starlark and tested internally on a -large code-base. After that they will be released to Bazel. - -
-Q1 2021 - -* Improve the **Java sandwich**, making it possible to rewrite existing Java - rules. DONE - -
-Q2 2021 - -* Improve Starlark support for **native libraries** in Java. - IN PROGRESS -* Improve java_common support for plugins and IDEs - proposal - [Java common refactoring](https://docs.google.com/document/d/10isTEK5W9iCPp4BIyGBrLY5iti3Waaam6EeGVSjq3r8/edit). - IN PROGRESS -* **java_library** rule is Starlarkified. IN - PROGRESS - -
-Mid 2021 - -* **java_binary and java_test** rules are Starlarkified. -* **java_import and java_plugin** rules are Starlarkified. - -
-Fall 2021 - -* Starlarkification of **java_\*_proto_library** -* Remaining java rules are Starlarkified: **java_package_configuration, - java_runtime, java_toolchain**. - -
-2022 - -* Starlarkification of **java_common module**. - -## C++ rules - -Before C++ rules can be rewritten in Starlark some internal cleanups are needed. -After that the C++ rules will be rewritten to Starlark piece by piece using -builtins functionality. The API for C++ rules will not be made accessible from -.bzl files until cc_module is rewritten in Starlark as well. - -
-Q1 2021 - -* **Clang modules** support, DROPPED* -* and **Include scanning** support, expected performance improvements from - both DROPPED* -* *We need more data to evaluate whether modules are really what is needed to - improve performance. - -
-Q2 2021 - -* Internal **Go rules** are Starlarkified IN - PROGRESS -* Objective-C rules **objc_library and objc_import** and native code related - to them are Starlarkified IN PROGRESS - -
-Fall 2021 and beginning 2022 - -* **cc_binary, cc_test and cc_library** are Starlarkified - -
-2022 - -* Starlarkification of other C++ rules (**fdo_profile, cc_import, - cc_toolchain, cc_toolchain_suite, fdo_prefetch_hints, cc_toolchain_alias, - cc_libc_top_alias, cc_host_toolchain_alias,** +2) -* Starlarkification of **cc_common module** - -## Misc - -
-Mid 2021 - -* Aspect can propagate other aspects - proposal - [Aspects Propagating Other Aspects](https://docs.google.com/document/d/1fVNyskIgMoiNeOOGt57LdDmEkAShkYUKYQTkf5yD1fA/edit). - IN PROGRESS -* Improve Starlark testing framework diff --git a/6.5.0/community/roadmaps-starlark.mdx b/6.5.0/community/roadmaps-starlark.mdx deleted file mode 100644 index cb3ef4c..0000000 --- a/6.5.0/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/6.5.0/community/sig.mdx b/6.5.0/community/sig.mdx deleted file mode 100644 index 150fb41..0000000 --- a/6.5.0/community/sig.mdx +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/contribution-policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/master/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/6.5.0/community/update.mdx b/6.5.0/community/update.mdx deleted file mode 100644 index 2adbe86..0000000 --- a/6.5.0/community/update.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: 'Community updates' ---- - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -TBD | 12/15/2022 | | diff --git a/6.5.0/concepts/build-ref.mdx b/6.5.0/concepts/build-ref.mdx deleted file mode 100644 index f26936a..0000000 --- a/6.5.0/concepts/build-ref.mdx +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: 'Workspaces, packages, and targets' ---- - - -Bazel builds software from source code organized in a directory tree called a -workspace. Source files in the workspace are organized in a nested hierarchy of -packages, where each package is a directory that contains a set of related -source files and one `BUILD` file. The `BUILD` file specifies what software -outputs can be built from the source. - - -## Workspace - -A _workspace_ is a directory tree on your filesystem that contains the source -files for the software you want to build. Each workspace has a text file named -`WORKSPACE` which may be empty, or may contain references to [external -dependencies](/docs/external) required to build the outputs. - -Directories containing a file called `WORKSPACE` are considered the root of a -workspace. Therefore, Bazel ignores any directory trees in a workspace rooted at -a subdirectory containing a `WORKSPACE` file, as they form another workspace. - -Bazel also supports `WORKSPACE.bazel` file as an alias of `WORKSPACE` file. If -both files exist, `WORKSPACE.bazel` is used. - -### Repositories - -Code is organized in _repositories_. The directory containing the `WORKSPACE` -file is the root of the main repository, also called `@`. Other, (external) -repositories are defined in the `WORKSPACE` file using workspace rules, or -generated from modules and extensions in the Bzlmod system. See [external -dependencies overview](/external/overview) for more information. - -The workspace rules bundled with Bazel are documented in the [Workspace -Rules](/reference/be/workspace) section in the [Build -Encyclopedia](/reference/be/overview) and the documentation on [embedded -Starlark repository rules](/rules/lib/repo/index). - -As external repositories are repositories themselves, they often contain a -`WORKSPACE` file as well. However, these additional `WORKSPACE` files are -ignored by Bazel. In particular, repositories depended upon transitively are not -added automatically. - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a file named `BUILD` (or -`BUILD.bazel`). A package includes all files in its directory, plus all -subdirectories beneath it, except those which themselves contain a `BUILD` file. -From this definition, no file or directory may be a part of two different -packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/6.5.0/concepts/platforms.mdx b/6.5.0/concepts/platforms.mdx deleted file mode 100644 index b95d0a3..0000000 --- a/6.5.0/concepts/platforms.mdx +++ /dev/null @@ -1,459 +0,0 @@ ---- -title: 'Building with Platforms' ---- - - -Bazel has sophisticated support for modeling [platforms][Platforms] and -[toolchains][Toolchains]. Integrating this with real projects requires -careful cooperation between code owners, rule maintainers, and core Bazel devs. - -This page summarizes the purpose of platforms and shows how to build with them. - -**tl;dr:** Bazel's platform and toolchain APIs are available but won't work -everywhere until all language rules, `select()`s and other legacy references -are updated. This work is ongoing. Eventually all builds will be platform-based. -Read below to see where your builds fit. - -For more formal documentation, see: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] - -## Background - -*Platforms* and *toolchains* were introduced to *standardize* how software -projects target different machines and build with the right language tools. - -This is a relatively recent addition to Bazel. It was -[inspired][Inspiration] -by the observation that language maintainers were *already* doing this in ad -hoc, incompatible ways. For example, C++ rules use `--cpu` and `--crosstool_top` -to set a build's target CPU and C++ toolchain. Neither of these correctly models -a "platform". Historic attempts to do so caused awkward and inaccurate builds. -These flags also don't control Java compilation, which evolved its own -independent interface with `--java_toolchain`. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including clear APIs that -encourage language and project interoperability. This is what these new APIs are -for. - -### Migration - -The platform and toolchain APIs only work when projects actually use them. This -isn't trivial because a project's rule logic, toolchains, dependencies, and -`select()`s have to support them. This requires a careful migration sequence -to keep all projects and their dependencies working correctly. - -For example, Bazel's -[C++ Rules] support platforms. But the [Apple Rules] don't. *Your* C++ project -may not care about Apple. But others may. So -it's not yet safe to globally enable platforms for all C++ builds. - -The remainder of this page describes this migration sequence and how and when -your projects can fit in. - -## Goal - -Bazel's platform migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. The rules your project uses can infer correct toolchains from -`//:myplatform`. -1. The rules your project's dependencies use can infer correct toolchains -from `//:myplatform`. -1. *Either* the projects depending on yours support `//:myplatform` *or* your -project supports the legacy APIs (like `--crosstool_top`). -1. `//:myplatform` references -[common declarations][Common Platform Declaration] -of `CPU`, `OS`, and other generic concepts that support automatic cross-project -compatibility. -1. All relevant projects' -[`select()`s][select()] -understand the machine properties implied by `//:myplatform`. -1. `//:myplatform` is defined in a clear, reusable place: in your project's -repo if the platform is unique to your project, otherwise somewhere all projects -that may use this platform can find. - -The old APIs will be removed as soon as this goal is achieved. Then this will -be the standard way projects select platforms and toolchains. - -## Should I use platforms? - -If you just want to build or cross-compile a project, you should follow the -project’s official documentation. - -If you’re a project, language, or toolchain maintainer, you'll eventually want -to support the new APIs. Whether you wait until the global migration is complete -or opt in early depends on your specific value / cost needs: - -### Value - -* You can `select()` or choose toolchains on the exact properties you care - about instead of hard-coded flags like `--cpu`. For example, multiple CPUs - can support the [same instruction set](https://en.wikipedia.org/wiki/SSE4). -* More correct builds. If you `select()` with `--cpu` in the above example, then - add a new CPU that supports the same instruction set, the `select()` - fails to recognize the new CPU. But a `select()` on platforms remains accurate. -* Simpler user experience. All projects understand: - `--platforms=//:myplatform`. No need for multiple language-specific - flags on the command line. -* Simpler language design. All languages share a common API for defining - toolchains, using toolchains, and selecting the right toolchain for a platform. -* Targets can be [skipped](/docs/platforms#skipping-incompatible-targets) in the - build and test phase if they are incompatible with the target platform. - -### Costs - -* Dependent projects that don't yet support platforms might not automatically work - with yours. -* Making them work may require [additional temporary maintenance](#platform-mappings). -* Co-existence of new and legacy APIs requires more careful user guidance to - avoid confusion. -* Canonical definitions for [common properties](#common-platform-properties) like - `OS` and `CPU` are still evolving and may require extra initial contributions. -* Canonical definitions for language-specific toolchains are still evolving and - may require extra initial contributions. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `WORKSPACE` with -[`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -See [here][Toolchains] for a deeper dive. - -## Status - -Current platform support varies among languages. All of Bazel's major rules are -moving to platforms. But this process will take time. This is for three main reasons: - -1. Rule logic must be updated to get tool info from the new [toolchain -API][Toolchains] (`ctx.toolchains`) and stop reading legacy settings like -`--cpu` and `--crosstool_top`. This is relatively straightforward. - -1. Toolchain maintainers must define toolchains and make them accessible to - users (in GitHub repositories and `WORKSPACE` entries). - This is technically straightforward but must be intelligently organized to - maintain an easy user experience. - - Platform definitions are also necessary (unless you build for the same machine - Bazel runs on). Generally, projects should define their own platforms. - -1. Existing projects must be migrated. `select()`s and - [transitions][Starlark transitions] also have to be - migrated. This is the biggest challenge. It's particularly challenging for - multi-language projects (which may fail if *all* languages can't read - `--platforms`). - -If you're designing a new rule set, you must support platforms from the -beginning. This automatically makes your rules compatible with other -rules and projects, with increasing value as the platform API becomes -more ubiquitous. - -### Common platform properties - -Platform properties like `OS` and `CPU` that are common across projects should -be declared in a standard, centralized place. This encourages cross-project -and cross-language compatibility. - -For example, if *MyApp* has a `select()` on `constraint_value` -`@myapp//cpus:arm` and *SomeCommonLib* has a `select()` on -`@commonlib//constraints:arm`, these trigger their "arm" modes with incompatible -criteria. - -Globally common properties are declared in the -[`@platforms`](https://github.com/bazelbuild/platforms) repo -(so the canonical label for the above example is `@platforms//cpu:arm`). -Language-common properties should be declared in the repos of their respective -languages. - -### Default platforms - -Generally, project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the -kinds of machines they want to build for. These are then triggered with -`--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@local_config_platform//:host` -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### C++ - -Bazel's C++ rules use platforms to select toolchains when you set -`--incompatible_enable_cc_toolchain_resolution` -([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -If your project is pure C++ and not depended on by non-C++ projects, you can use -platforms safely as long as your [`select`](#select)s and -[transitions](#transitions) are compatible. See -[#7260](https://github.com/bazelbuild/bazel/issues/7260) and -[Configuring C++ toolchains] for more guidance. - -This mode is not enabled by default. This is because Apple projects -still configure C++ dependencies with `--cpu` and `--crosstool_top` -([example](https://github.com/bazelbuild/bazel/issues/8716#issuecomment-507230303)). So this depends on the Apple rules migrating to platforms. - -### Java - -Bazel's Java rules use platforms. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -To learn how to use the configuration flags, see the [Bazel and Java](/docs/bazel-and-java) manual. -For additional information, see the [Design document](https://docs.google.com/document/d/1MVbBxbKVKRJJY7DnkptHpvz7ROhyAYy4a-TZ-n7Q0r4). - -If you are still using legacy flags, follow the migration process in [Issue #7849](https://github.com/bazelbuild/bazel/issues/7849). - -### Android - -Bazel's Android rules use platforms to select toolchains when you set -`--incompatible_enable_android_toolchain_resolution`. - -This is not enabled by default. But migration is well on its way. - -### Apple - -Bazel's Apple rules do not yet support platforms to select Apple toolchains. - -They also don't support platform-enabled C++ dependencies because they use the -legacy `--crosstool_top` to set the C++ toolchain. Until this is migrated, you -can mix Apple projects with platorm-enabled C++ with [platform -mappings](#platform-mappings) -([example](https://github.com/bazelbuild/bazel/issues/8716#issuecomment-516572378)). - -### Other languages - -* Bazel's [Rust rules](https://github.com/bazelbuild/rules_rust) fully support -platforms. -* Bazel's [Go rules](https://github.com/bazelbuild/rules_go) fully support -platforms -([details](https://github.com/bazelbuild/rules_go#how-do-i-cross-compile)). - -If you're designing rules for a new language, use platforms -to select your language's toolchains. See the -[toolchains documentation](/docs/toolchains) for a good walkthrough. - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so that `select()`s supports as wide a variety -of machines as possible. A library with `ARM`-specific sources should support -*all* `ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. When -migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles through the migration window. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles through the migration -window. - -## How to use platforms today - -If you just want to build or cross-compile a project, you should follow the -project's official documentation. It's up to language and project maintainers to -determine how and when to integrate with platforms, and what value that offers. - -If you're a project, language, or toolchain maintainer and your build doesn't -use platforms by default, you have three options (besides waiting for the global -migration): - -1. Flip on the "use platforms" flag for your project's languages ([if they have - one](#status)) and do whatever testing you need to see if the projects you care - about work. - -1. If the projects you care about still depend on legacy flags like `--cpu` and - `--crosstool_top`, use these together with `--platforms`: - - ```posix-terminal - bazel build //:my_mixed_project --platforms==//:myplatform --cpu=... --crosstool_top=... - ``` - - This has some maintenance cost (you have to manually make sure the settings - match). But this should work in the absence of renegade - [transitions](#transitions). - -1. Write [platform mappings](#platform-mappings) to support both styles by - mapping `--cpu`-style settings to corresponding platforms and vice versa. - -### Platform mappings - -*Platform mappings* is a temporary API that lets platform-powered and -legacy-powered logic co-exist in the same build through the latter's deprecation -window. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--cpu=ios_x86_64 --apple_platform_type=ios". - //platforms:ios - --cpu=ios_x86_64 - --apple_platform_type=ios - -flags: - # Maps "--cpu=ios_x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --cpu=ios_x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See -[here](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit) -for complete details. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/bazel-discuss) -or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact -[bazel-dev@googlegroups.com](https://groups.google.com/forum/#!forum/bazel-dev). - -## See also - -* [Configurable Builds - Part 1](https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html) -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook](https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/) -* [`hlopko/bazel_platforms_examples`](https://github.com/hlopko/bazel_platforms_examples) -* [Example C++ custom toolchain](https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms) - -[Platforms]: /docs/platforms -[Toolchains]: /docs/toolchains -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[C++ Rules]: /docs/bazel-and-cpp -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Common Platform Declarations]: https://github.com/bazelbuild/platforms#motivation -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[platform Rule]: /reference/be/platform#platform -[constraint_value Rule]: /reference/be/platform#constraint_value -[constraint_setting Rule]: /reference/be/platform#constraint_setting -[Starlark rule]: /rules/rules -[Starlark provider]: /rules/rules#providers -[target_compatible_with Attribute]: /reference/be/platform#toolchain.target_compatible_with -[exec_compatible_with Attribute]: /reference/be/platform#toolchain.exec_compatible_with -[register_toolchains Function]: /rules/lib/globals#register_toolchains -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Starlark transitions]: /rules/config#user-defined-transitions -[Defining Constraints and Platforms]: /docs/platforms#constraints-platforms -[Configuring C++ toolchains]: /tutorials/cc-toolchain-config diff --git a/6.5.0/concepts/visibility.mdx b/6.5.0/concepts/visibility.mdx deleted file mode 100644 index 082b855..0000000 --- a/6.5.0/concepts/visibility.mdx +++ /dev/null @@ -1,461 +0,0 @@ ---- -title: 'Visibility' ---- - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. - -A target `A` is visible to a target `B` if they are in the same package, or if -`A` grants visibility to `B`'s package. Thus, packages are the unit of -granularity for deciding whether or not to allow access. If `B` depends on `A` -but `A` is not visible to `B`, then any attempt to build `B` fails during -[analysis](/reference/glossary#analysis-phase). - -Note that granting visibility to a package does not by itself grant visibility -to its subpackages. For more details on package and subpackages, see -[Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This should not be done for production usage in -submitted code. - -The primary way to control visibility is with the -[`visibility`](/reference/be/common-definitions#common.visibility) attribute on -rule targets. This section describes the format of this attribute, and how to -determine a target's visibility. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that do not correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. (May not be combined - with any other specification.) - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this package can use this target. (May not be combined with any other - specification.) - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets defined -in `//tests/BUILD`, but not by targets defined in `//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is: - -1. The value of its `visibility` attribute, if set; or else - -2. The value of the -[`default_visibility`](/reference/be/functions#package.default_visibility) -argument of the [`package`](/reference/be/functions#package) statement in the -target's `BUILD` file, if such a declaration exists; or else - -3. `//visibility:private`. - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -#### Example - -File `//frobber/bin/BUILD`: - -```python -# This target is visible to everyone -cc_binary( - name = "executable", - visibility = ["//visibility:public"], - deps = [":library"], -) - -# This target is visible only to targets declared in the same package -cc_library( - name = "library", - # No visibility -- defaults to private since no - # package(default_visibility = ...) was used. -) - -# This target is visible to targets in package //object and //noun -cc_library( - name = "subject", - visibility = [ - "//noun:__pkg__", - "//object:__pkg__", - ], -) - -# See package group "//frobber:friends" (below) for who can -# access this target. -cc_library( - name = "thingy", - visibility = ["//frobber:friends"], -) -``` - -File `//frobber/BUILD`: - -```python -# This is the package group declaration to which target -# //frobber/bin:thingy refers. -# -# Our friends are packages //frobber, //fribber and any -# subpackage of //fribber. -package_group( - name = "friends", - packages = [ - "//fribber/...", - "//frobber", - ], -) -``` - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -### Source file target visibility - -You can explicitly set the visibility of a source file target by calling -[`exports_files`](/reference/be/functions#exports_files). When no `visibility` -argument is passed to `exports_files`, it makes the visibility public. -`exports_files` may not be used to override the visibility of a generated file. - -For source file targets that do not appear in a call to `exports_files`, the -visibility depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is set, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```python -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```python -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -You can change this behavior by disabling -[`--incompatible_visibility_private_attributes_at_definition`](https://github.com/bazelbuild/proposals/blob/master/designs/2019-10-15-tool-visibility.md). -When disabled, implicit dependencies are treated like any other dependency. -This means that the target being depended on (such as our C++ compiler) must be -visible to every instance of the rule. In practice this usually means the target -must have public visibility. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target definitions into a macro in a `.bzl` file. Without the protection of load -visibility, they might find their macro reused by other collaborators in the -same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```python -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```python -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```python -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```python -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```python -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```python -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```python -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses) - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```python -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```python -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```python -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/6.5.0/configure/attributes.mdx b/6.5.0/configure/attributes.mdx deleted file mode 100644 index 86e7117..0000000 --- a/6.5.0/configure/attributes.mdx +++ /dev/null @@ -1,1093 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platform#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/docs/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms-intro) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/docs/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/docs/query-how-to) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/docs/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/rules/rules) and [macros](/rules/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//tools/target_cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//tools/target_cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//tools/target_cpu:x86": True, - "//tools/target_cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//tools/target_cpu:x86": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -Because [`bind()`](/reference/be/workspace#bind) is a WORKSPACE rule, not a BUILD rule. - -Workspace rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/docs/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/docs/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /rules/config#user-defined-build-settings diff --git a/6.5.0/configure/best-practices.mdx b/6.5.0/configure/best-practices.mdx deleted file mode 100644 index 8522f14..0000000 --- a/6.5.0/configure/best-practices.mdx +++ /dev/null @@ -1,87 +0,0 @@ ---- -title: 'Best Practices' ---- - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `WORKSPACE` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/docs/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/6.5.0/configure/coverage.mdx b/6.5.0/configure/coverage.mdx deleted file mode 100644 index 6cca70a..0000000 --- a/6.5.0/configure/coverage.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -#### Prerequisites - -Running coverage with python has some prerequisites: - -- A bazel binary that includes [b01c859][python_coverage_commit], - which should be any Bazel >3.0. -- A [modified version of coverage.py][modified_coveragepy]. -// TODO: Upstream an lcov implementation so that this becomes usable - -#### Consuming the modified coverage.py - -A way to do this is via [rules_python][rules_python], this provides -the ability to use a `requirements.txt` file, the requirements listed -in the file are then created as bazel targets using the -[pip_install][pip_install_rule] repository rule. - -The `requirements.txt` should have the following entry: - -```text -git+https://github.com/ulfjack/coveragepy.git@lcov-support -``` - -The `rules_python`, `pip_install`, and the `requirements.txt` file should then be used in the WORKSPACE file as: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "rules_python", - url = "https://github.com/bazelbuild/rules_python/releases/download/0.5.0/rules_python-0.5.0.tar.gz", - sha256 = "cd6730ed53a002c56ce4e2f396ba3b3be262fd7cb68339f0377a45e8227fe332", -) - -load("@rules_python//python:pip.bzl", "pip_install") - -pip_install( - name = "python_deps", - requirements = "//:requirements.txt", -) -``` - -Note: The version of `rules_python` is incidental - this was simply -the latest at the time of writing. Refer to the -[upstream][rules_python] for up-to-date instructions. - -The coverage.py requirement can then be consumed by test targets by -setting the following in `BUILD` files: - -```python -load("@python_deps//:requirements.bzl", "entry_point") - -alias( - name = "python_coverage_tools", - actual = entry_point("coverage"), -) - -py_test( - name = "test", - srcs = ["test.py"], - env = { - "PYTHON_COVERAGE": "$(location :python_coverage_tools)", - }, - deps = [ - ":main", - ":python_coverage_tools", - ], -) -``` - -If you are using a hermetic Python toolchain, instead of adding the coverage -dependency to every `py_test` target you can instead add the coverage tool to -the toolchain configuration. - -Because the [pip_install][pip_install_rule] rule depends on the Python -toolchain, it cannot be used to fetch the `coverage` module. -Instead, add in your `WORKSPACE` e.g. - -```starlark -http_archive( - name = "coverage_linux_x86_64"", - build_file_content = """ -py_library( - name = "coverage", - srcs = ["coverage/__main__.py"], - data = glob(["coverage/*", "coverage/**/*.py"]), - visibility = ["//visibility:public"], -) -""", - sha256 = "84631e81dd053e8a0d4967cedab6db94345f1c36107c71698f746cb2636c63e3", - type = "zip", - urls = [ - "https://files.pythonhosted.org/packages/74/0d/0f3c522312fd27c32e1abe2fb5c323b583a5c108daf2c26d6e8dfdd5a105/coverage-6.4.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", - ], -) -``` - -Then configure your python toolchain as e.g. - -```starlark -py_runtime( - name = "py3_runtime_linux_x86_64", - coverage_tool = "@coverage_linux_x86_64//:coverage", - files = ["@python3_9_x86_64-unknown-linux-gnu//:files"], - interpreter = "@python3_9_x86_64-unknown-linux-gnu//:bin/python3", - python_version = "PY3", -) - -py_runtime_pair( - name = "python_runtimes_linux_x86_64", - py2_runtime = None, - py3_runtime = ":py3_runtime_linux_x86_64", -) - -toolchain( - name = "python_toolchain_linux_x86_64", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":python_runtimes_linux_x86_64", - toolchain_type = "@bazel_tools//tools/python:toolchain_type", -) -``` - -[lcov]: https://github.com/linux-test-project/lcov -[rules_python]: https://github.com/bazelbuild/rules_python -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[python_coverage_commit]: https://github.com/bazelbuild/bazel/commit/b01c85962d88661ec9f6c6704c47d8ce67ca4d2a -[modified_coveragepy]: https://github.com/ulfjack/coveragepy/tree/lcov-support -[pip_install_rule]: https://github.com/bazelbuild/rules_python#installing-pip-dependencies diff --git a/6.5.0/configure/memory.mdx b/6.5.0/configure/memory.mdx deleted file mode 100644 index 0d0e1c9..0000000 --- a/6.5.0/configure/memory.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: 'Running Bazel with Limited RAM' ---- - - -This page describes how to use flags to run Bazel with limited RAM. - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -However, if your builds are big enough, Bazel may throw an `OutOfMemoryError` -(OOM) when it doesn't have enough memory. You can make Bazel use less memory, at -the cost of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -`--nokeep_state_after_build`, and `--notrack_incremental_state`. - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. diff --git a/6.5.0/configure/toolchain-resolution.mdx b/6.5.0/configure/toolchain-resolution.mdx deleted file mode 100644 index da4f922..0000000 --- a/6.5.0/configure/toolchain-resolution.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Toolchain Resolution Implementation Details' ---- - - -**Note:** This section is intended for Bazel developers, and is not needed by -rule authors. - -Several SkyFunction classes implement the [toolchain resolution](/docs/toolchains) process: - -1. [`RegisteredToolchainsFunction`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/RegisteredToolchainsFunction.java) and - [`RegisteredExecutionPlatformsFunction`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/RegisteredExecutionPlatformsFunction.java) - find available toolchains and execution platforms, based on the current - configuration and WORKSPACE file. - -1. [`SingleToolchainResolutionFunction`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/SingleToolchainResolutionFunction.java) - resolves a single toolchain type for every execution platform. That is, for - every execution platform it finds the best registered toolchain to use based - on the following criteria: - - 1. Make sure the toolchain and target platform are compatible, by checking - the `target_compatible_with` attribute. - 1. Make sure the toolchain and execution platform are compatible, by - checking the `exec_compatible_with` attribute. - 1. If multiple toolchains are left, choose the highest-priority one (the - one that was registered first). - -1. [`ToolchainResolutionFunction`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/ToolchainResolutionFunction.java) calls - `SingleToolchainResolutionFunction` for each requested toolchain type, and - then determines the best execution platform to use. - - 1. First, remove any execution platform that does not have a valid - toolchain for each requested toolchain type. - 2. If multiple execution platforms are left, choose the highest-priority - one (the one that was registered first). - 1. If the execution platform is already set by the toolchain - transition, it will be selected first as described below. - -As discussed in [Toolchains and Configurations](/docs/toolchains#toolchains_and_configurations), -the dependency from a target to a toolchain uses a special configuration that -forces the execution platform to be the same for both. Despite the name -"toolchain transition", this is not implemented as a configuration -transition, but instead as a special subclass of -[`ConfiguredTargetKey`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/ConfiguredTargetKey.java), called -[`ToolchainDependencyConfiguredTargetKey`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/skyframe/ConfiguredTargetKey.java;bpv=1;bpt=1;l=164?ss=bazel&q=ConfiguredTargetKey&gsn=ToolchainDependencyConfiguredTargetKey&gs=kythe%3A%2F%2Fgithub.com%2Fbazelbuild%2Fbazel%3Flang%3Djava%3Fpath%3Dcom.google.devtools.build.lib.skyframe.ConfiguredTargetKey.ToolchainDependencyConfiguredTargetKey%2336c7e68f8cd5ea0b5a21b3769e63e6b2d489b9ca8c6f79798839e7f40cf2a19e). -In addition to the other data in `ConfiguredTargetKey`, this subclass also holds -the label of the execution platform. When `ToolchainResolutionFunction` is -considering which execution platform to use, if the forced execution platform -from the `ToolchainDependencyConfiguredTargetKey` is valid, it will be used even -if it is not the highest-priority. - -**Note:** If the forced execution platform is not valid (because there are no -valid toolchains, or because of execution constraints from the rule or target), -then the highest-priority valid execution platform will be used instead. diff --git a/6.5.0/contribute/breaking-changes.mdx b/6.5.0/contribute/breaking-changes.mdx deleted file mode 100644 index 744488d..0000000 --- a/6.5.0/contribute/breaking-changes.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels](#labels) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. - -To monitor the migration status of those projects, you can use the -[`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), -check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author. But you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. File Github issues to notify the owners of the downstream projects broken by your incompatible change. -1. Send PRs to fix downstream projects. -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/6.5.0/contribute/codebase.mdx b/6.5.0/contribute/codebase.mdx deleted file mode 100644 index e5365f0..0000000 --- a/6.5.0/contribute/codebase.mdx +++ /dev/null @@ -1,1679 +0,0 @@ ---- -title: 'The Bazel Code Base' ---- - - -This document is a description of the code base and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The code base of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the code base so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the “source of truth”; it’s derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated source tree ("workspace") and each -workspace usually has a single active server instance. This can be circumvented -by specifying a custom output base (see the "Directory layout" section for more -information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should ujexec() and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "workspace" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section “Configurations”. - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as as part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the @Command annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/rules/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the “main -repository”, the others are called “external repositories”. - -A repository is marked by a file called `WORKSPACE` (or `WORKSPACE.bazel`) in -its root directory. This file contains information that is "global" to the whole -build, for example, the set of available external repositories. It works like a -regular Starlark file which means that one can `load()` other Starlark files. -This is commonly used to pull in repositories that are needed by a repository -that's explicitly referenced (we call this the "`deps.bzl` pattern") - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by SymlinkForest, which symlinks every package in the main repository to -`$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..` (the former of course makes it impossible to have a package -called `external` in the main repository; that's why we are migrating away from -it) - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel’s ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid “Skyframe restarts” (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the WORKSPACE file and which do not make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the WORKSPACE file from -parsing regular packages so that `Package`does not need to cater for the needs -of both. This is unfortunately difficult to do because the two are intertwined -quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it’s common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -“native rules”, type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the code base, we often use “Rule” to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -“Rule” should be used exclusively to refer to the rule class itself; the target -is just a “target”. Also note that despite `RuleClass` having “class” in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=detailed`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". - -Fundamentally, we need these types of workarounds because we routinely have -hundreds of thousands of in-flight Skyframe nodes, and Java doesn't support -lightweight threads. - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **The `BUILD` language.** This is where new rules are defined. Starlark code - running in this context only has access to the contents of the `BUILD` file - itself and `.bzl` files loaded by it. -2. **Rule definitions.** This is how new rules (such as support for a new - language) are defined. Starlark code running in this context has access to - the configuration and data provided by its direct dependencies (more on this - later). -3. **The WORKSPACE file.** This is where external repositories (code that's not - in the main source tree) are defined. -4. **Repository rule definitions.** This is where new external repository types - are defined. Starlark code running in this context can run arbitrary code on - the machine where Bazel is running, and reach outside the workspace. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available -[here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/rules/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to tell things about other configured targets that depend on it. The reason why -"transitive" is in their name is that this is usually some sort of roll-up of -the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source of output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/rules/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the “basic unit” of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/docs/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/docs/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the WORKSPACE file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the WORKSPACE file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where a -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports - for this group. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It's under review in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can declare that a -particular target can only be depended on using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used to -special-case some things: - -* Aggregating middlemen are used to group artifacts together. This is so that - if a lot of actions use the same large set of inputs, we don't have N\*M - dependency edges, only N+M (they are being replaced with nested sets) -* Scheduling dependency middlemen ensure that an action runs before another. - They are mostly used for linting but also for C++ compilation (see - `CcCompilationContext.createMiddleman()` for an explanation) -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental “top-down action cache” that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/docs/query-how-to) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Managed directories - -Sometimes, external repositories need to modify files under the workspace root -(such as a package manager that houses the downloaded packages in a subdirectory of -the source tree). This is at odds with the assumption Bazel makes that source -files are only modified by the user and not by itself and allows packages to -refer to every directory under the workspace root. In order to make this kind of -external repository work, Bazel does two things: - -1. Allows the user to specify subdirectories of the workspace Bazel is not - allowed to reach into. They are listed in a file called `.bazelignore` and - the functionality is implemented in `BlacklistedPackagePrefixesFunction`. -2. We encode the mapping from the subdirectory of the workspace to the external - repository it is handled by into `ManagedDirectoriesKnowledge` and handle - `FileStateValue`s referring to them in the same way as those for regular - external repositories. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is_ mostly _written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/6.5.0/contribute/design-documents.mdx b/6.5.0/contribute/design-documents.mdx deleted file mode 100644 index 79c6fc4..0000000 --- a/6.5.0/contribute/design-documents.mdx +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: 'Design Documents' ---- - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, WORKSPACE, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/6.5.0/contribute/docs-style-guide.mdx b/6.5.0/contribute/docs-style-guide.mdx deleted file mode 100644 index 5255a70..0000000 --- a/6.5.0/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/6.5.0/contribute/docs.mdx b/6.5.0/contribute/docs.mdx deleted file mode 100644 index cf98790..0000000 --- a/6.5.0/contribute/docs.mdx +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/6.5.0/contribute/index.mdx b/6.5.0/contribute/index.mdx deleted file mode 100644 index 0ee330f..0000000 --- a/6.5.0/contribute/index.mdx +++ /dev/null @@ -1,81 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/docs/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-dev@](mailto:bazel-dev@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/docs/rule-challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/searching-codebase). diff --git a/6.5.0/contribute/maintainers-guide.mdx b/6.5.0/contribute/maintainers-guide.mdx deleted file mode 100644 index 4d636c2..0000000 --- a/6.5.0/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,206 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/contribution-policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue using the [Issue - Template](https://github.com/bazelbuild/bazel/blob/master/ISSUE_TEMPLATE.md) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not follow the [Issue - Template](https://github.com/bazelbuild/bazel/blob/master/ISSUE_TEMPLATE.md). -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [sventiffe](https://github.com/sventiffe) -* [`team-Build-Language`](https://github.com/bazelbuild/bazel/labels/team-Build-Language): Issues for the BUILD, .bzl APIs and Stardoc. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Issues for Core team - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team - * Contact: [communikit](https://github.com/communikit) -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP): Issues for C++ rules, including native Apple rule logic - * Contact: [oquenchil](https://github.com/oquenchil) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [comius](https://github.com/comius) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [comius](https://github.com/comius) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/6.5.0/contribute/naming.mdx b/6.5.0/contribute/naming.mdx deleted file mode 100644 index 2b1ded4..0000000 --- a/6.5.0/contribute/naming.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/6.5.0/contribute/patch-acceptance.mdx b/6.5.0/contribute/patch-acceptance.mdx deleted file mode 100644 index ad95245..0000000 --- a/6.5.0/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/contribution-policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-dev@googlegroups.com](mailto:bazel-dev@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-dev@googlegroups.com](mailto:bazel-dev@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/6.5.0/contribute/policy.mdx b/6.5.0/contribute/policy.mdx deleted file mode 100644 index 804c1c2..0000000 --- a/6.5.0/contribute/policy.mdx +++ /dev/null @@ -1,77 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/6.5.0/contribute/release-notes.mdx b/6.5.0/contribute/release-notes.mdx deleted file mode 100644 index f1cac0e..0000000 --- a/6.5.0/contribute/release-notes.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: 'Writing release notes' ---- - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/6.5.0/contribute/support.mdx b/6.5.0/contribute/support.mdx deleted file mode 100644 index 7ad6772..0000000 --- a/6.5.0/contribute/support.mdx +++ /dev/null @@ -1,19 +0,0 @@ ---- -title: 'Support Policy' ---- - - -The Bazel team generally avoids making backwards-incompatible changes. However, -these changes are sometimes necessary to fix bugs, make improvements (such as -improving performance or usability) to the system, or to lock down APIs that -are known to be brittle. - -Major changes are announced in advance on the -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) mailing -list. Both undocumented features (attributes, rules, "Make" variables, and -flags) and documented features that are marked *experimental* are subject to -change at any time without prior notice. - -Report any bugs or regressions you find on -[GitHub](https://github.com/bazelbuild/bazel/issues). The repository maintainers -make an effort to triage reported issues within 2 business days. diff --git a/6.5.0/contribute/windows-chocolatey-maintenance.mdx b/6.5.0/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index 464cfeb..0000000 --- a/6.5.0/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/6.5.0/contribute/windows-scoop-maintenance.mdx b/6.5.0/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 2451dce..0000000 --- a/6.5.0/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/6.5.0/docs/android-build-performance.mdx b/6.5.0/docs/android-build-performance.mdx deleted file mode 100644 index 1fc2d26..0000000 --- a/6.5.0/docs/android-build-performance.mdx +++ /dev/null @@ -1,96 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/docs/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/6.5.0/docs/android-instrumentation-test.mdx b/6.5.0/docs/android-instrumentation-test.mdx deleted file mode 100644 index f4f0b58..0000000 --- a/6.5.0/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,578 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/tutorials/android-app) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/6.5.0/docs/android-ndk.mdx b/6.5.0/docs/android-ndk.mdx deleted file mode 100644 index 137cfa3..0000000 --- a/6.5.0/docs/android-ndk.mdx +++ /dev/null @@ -1,419 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/tutorials/android-app) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information on the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the STL - -To configure the C++ STL, use the flag `--android_crosstool_top`. - -```posix-terminal -bazel build //:app --android_crosstool_top={{ "" }}target label{{ "" }} -``` - -The available STLs in `@androidndk` are: - -| STL | Target label | -|---------|-----------------------------------------| -| STLport | `@androidndk//:toolchain-stlport` | -| libc++ | `@androidndk//:toolchain-libcpp` | -| gnustl | `@androidndk//:toolchain-gnu-libstdcpp` | - -For r16 and below, the default STL is `gnustl`. For r17 and above, it is -`libc++`. For convenience, the target `@androidndk//:default_crosstool` is -aliased to the respective default STLs. - -Please note that from r18 onwards, [STLport and gnustl will be -removed](https://android.googlesource.com/platform/ndk/+/master/docs/Roadmap.md#ndk-r18), -making `libc++` the only STL in the NDK. - -See the [NDK -documentation](https://developer.android.com/ndk/guides/cpp-support) -for more information on these STLs. - -## Configuring the target ABI - -To configure the target ABI, use the `--fat_apk_cpu` flag as follows: - -```posix-terminal -bazel build //:app --fat_apk_cpu={{ "" }}comma-separated list of ABIs{{ "" }} -``` - -By default, Bazel builds native Android code for `armeabi-v7a`. To build for x86 -(such as for emulators), pass `--fat_apk_cpu=x86`. To create a fat APK for multiple -architectures, you can specify multiple CPUs: `--fat_apk_cpu=armeabi-v7a,x86`. - -If more than one ABI is specified, Bazel will build an APK containing a shared -object for each ABI. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Integration with platforms and toolchains - -Bazel's configuration model is moving towards -[platforms](/docs/platforms) and -[toolchains](/docs/toolchains). If your -build uses the `--platforms` flag to select for the architecture or operating system -to build for, you will need to pass the `--extra_toolchains` flag to Bazel in -order to use the NDK. - -For example, to integrate with the `android_arm64_cgo` toolchain provided by -the Go rules, pass `--extra_toolchains=@androidndk//:all` in addition to the -`--platforms` flag. - -```posix-terminal -bazel build //my/cc:lib \ - --platforms=@io_bazel_rules_go//go/toolchain:android_arm64_cgo \ - --extra_toolchains=@androidndk//:all -``` - -You can also register them directly in the `WORKSPACE` file: - -```python -android_ndk_repository(name = "androidndk") -register_toolchains("@androidndk//:all") -``` - -Registering these toolchains tells Bazel to look for them in the NDK `BUILD` -file (for NDK 20) when resolving architecture and operating system constraints: - -```python -toolchain( - name = "x86-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:x86_32", - ], - toolchain = "@androidndk//:x86-clang8.0.7-libcpp", -) - -toolchain( - name = "x86_64-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:x86_64", - ], - toolchain = "@androidndk//:x86_64-clang8.0.7-libcpp", -) - -toolchain( - name = "arm-linux-androideabi-clang8.0.7-v7a-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:arm", - ], - toolchain = "@androidndk//:arm-linux-androideabi-clang8.0.7-v7a-libcpp", -) - -toolchain( - name = "aarch64-linux-android-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:aarch64", - ], - toolchain = "@androidndk//:aarch64-linux-android-clang8.0.7-libcpp", -) -``` - -## How it works: introducing Android configuration transitions - -The `android_binary` rule can explicitly ask Bazel to build its dependencies in -an Android-compatible configuration so that the Bazel build *just works* without -any special flags, except for `--fat_apk_cpu` and `--android_crosstool_top` for -ABI and STL configuration. - -Behind the scenes, this automatic configuration uses Android [configuration -transitions](/rules/rules#configurations). - -A compatible rule, like `android_binary`, automatically changes the -configuration of its dependencies to an Android configuration, so only -Android-specific subtrees of the build are affected. Other parts of the build -graph are processed using the top-level target configuration. It may even -process a single target in both configurations, if there are paths through the -build graph to support that. - -Once Bazel is in an Android-compatible configuration, either specified at the -top level or due to a higher-level transition point, additional transition -points encountered do not further modify the configuration. - -The only built-in location that triggers the transition to the Android -configuration is `android_binary`'s `deps` attribute. - -Note: The `data` attribute of `android_binary` intentionally does *not* -trigger the transition. Additionally, `android_local_test` and `android_library` -intentionally do *not* trigger the transition at all. - -For example, if you try to build an `android_library` target with a `cc_library` -dependency without any flags, you may encounter an error about a missing JNI -header: - -``` -ERROR: {{ "" }}project{{ "" }}/app/src/main/BUILD.bazel:16:1: C++ compilation of rule '//app/src/main:jni_lib' failed (Exit 1) -app/src/main/cpp/native-lib.cpp:1:10: fatal error: 'jni.h' file not found -#include - ^~~~~~~ -1 error generated. -Target //app/src/main:lib failed to build -Use --verbose_failures to see the command lines of failed build steps. -``` - -Ideally, these automatic transitions should make Bazel do the right thing in the -majority of cases. However, if the target on the Bazel command-line is already -below any of these transition rules, such as C++ developers testing a specific -`cc_library`, then a custom `--crosstool_top` must be used. - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--crosstool_top`, `--cpu` and `--host_crosstool_top` -flags. - -For example: - -```posix-terminal -bazel build //my/cc/jni:target \ - --crosstool_top=@androidndk//:default_crosstool \ - --cpu= \ - --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -``` - -In this example, the top-level `cc_library` and `cc_binary` targets are built -using the NDK toolchain. However, this causes Bazel's own host tools to be built -with the NDK toolchain (and thus for Android), because the host toolchain is -copied from the target toolchain. To work around this, specify the value of -`--host_crosstool_top` to be `@bazel_tools//tools/cpp:toolchain` to -explicitly set the host's C++ toolchain. - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/docs/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --crosstool_top=@androidndk//:default_crosstool -common:android_x86 --cpu=x86 -common:android_x86 --host_crosstool_top=@bazel_tools//tools/cpp:toolchain - -common:android_armeabi-v7a --crosstool_top=@androidndk//:default_crosstool -common:android_armeabi-v7a --cpu=armeabi-v7a -common:android_armeabi-v7a --host_crosstool_top=@bazel_tools//tools/cpp:toolchain - -# In general -common:android_ --crosstool_top=@androidndk//:default_crosstool -common:android_ --cpu= -common:android_ --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/6.5.0/docs/bazel-and-android.mdx b/6.5.0/docs/bazel-and-android.mdx deleted file mode 100644 index b2d5d62..0000000 --- a/6.5.0/docs/bazel-and-android.mdx +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/tutorials/android-app). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/6.5.0/docs/bazel-and-apple.mdx b/6.5.0/docs/bazel-and-apple.mdx deleted file mode 100644 index 53b1de5..0000000 --- a/6.5.0/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/tutorials/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/rules/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/apple_bitcode_mode) - * [`apple_common`](/rules/lib/apple_common) - * [`apple_platform`](/rules/lib/apple_platform) - * [`apple_platform_type`](/rules/lib/apple_platform_type) - * [`apple_toolchain`](/rules/lib/apple_toolchain) - * [`XcodeVersionConfig`](/rules/lib/XcodeVersionConfig) - -* Configuration fragments: - - * [`apple`](/rules/lib/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/ObjcProvider) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/6.5.0/docs/bazel-and-cpp.mdx b/6.5.0/docs/bazel-and-cpp.mdx deleted file mode 100644 index 98a2ec8..0000000 --- a/6.5.0/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/tutorials/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/cc-toolchain-config) -* [Integrating with C++ rules](/docs/integrating-with-rules-cc) - -## Best practices - -In addition to [general Bazel best practices](/docs/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. diff --git a/6.5.0/docs/bazel-and-java.mdx b/6.5.0/docs/bazel-and-java.mdx deleted file mode 100644 index ce100d6..0000000 --- a/6.5.0/docs/bazel-and-java.mdx +++ /dev/null @@ -1,342 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/tutorials/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/docs/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/rules/concepts) when building your Java -projects: - -* Main Java provider: [`java_common`](/rules/lib/java_common) -* Main Java module: [`JavaInfo`](/rules/lib/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/java_compilation_info) - * [`java_output`](/rules/lib/java_output) - * [`java_output_jars`](/rules/lib/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using `local_java_repository` or -`remote_java_repository` rules in the `WORKSPACE` file. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@bazel_tools//tools/jdk:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `WORKSPACE` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@bazel_tools//tools/jdk:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@bazel_tools//tools/jdk:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@bazel_tools//tools/jdk:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=localjdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/6.5.0/docs/bazel-and-javascript.mdx b/6.5.0/docs/bazel-and-javascript.mdx deleted file mode 100644 index f0a58ab..0000000 --- a/6.5.0/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS rules](https://github.com/bazelbuild/rules_nodejs/) -* [NodeJS rules API documentation](https://bazelbuild.github.io/rules_nodejs/) -* [Angular rules](https://www.npmjs.com/package/@angular/bazel) -* [Angular rules API documentation](https://angular.github.io/bazel-builds/) diff --git a/6.5.0/docs/configurable-attributes.mdx b/6.5.0/docs/configurable-attributes.mdx deleted file mode 100644 index 86e7117..0000000 --- a/6.5.0/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1093 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platform#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/docs/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms-intro) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/docs/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/docs/query-how-to) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/docs/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/rules/rules) and [macros](/rules/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//tools/target_cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//tools/target_cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//tools/target_cpu:x86": True, - "//tools/target_cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//tools/target_cpu:x86": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -Because [`bind()`](/reference/be/workspace#bind) is a WORKSPACE rule, not a BUILD rule. - -Workspace rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/docs/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/docs/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /rules/config#user-defined-build-settings diff --git a/6.5.0/docs/sandboxing.mdx b/6.5.0/docs/sandboxing.mdx deleted file mode 100644 index 9d40351..0000000 --- a/6.5.0/docs/sandboxing.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Sandboxing' ---- - - -This article covers sandboxing in Bazel, installing `sandboxfs`, and debugging -your sandboxing environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](persistent-workers.md) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](persistent-workers.md). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](persistent-workers.md), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](multiplex-worker.md) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## sandboxfs - -`sandboxfs` is a FUSE file system that exposes an arbitrary view of the -underlying file system without time penalties. Bazel uses `sandboxfs` to -generate `execroot/` instantaneously for each action, avoiding the cost of -issuing thousands of system calls. Note that further I/O within `execroot/` may -be slower due to FUSE overhead. - -### Install sandboxfs - -Use the following steps to install `sandboxfs` and perform a Bazel build with -it: - -**Download** - -[Download and install](https://github.com/bazelbuild/sandboxfs/blob/master/INSTALL.md) -`sandboxfs` so that the `sandboxfs` binary ends up in your `PATH`. - -**Run `sandboxfs`** - -1. (macOS-only) [Install OSXFUSE](https://osxfuse.github.io/). -2. (macOS-only) Run: - - ```posix-terminal - sudo sysctl -w vfs.generic.osxfuse.tunables.allow_other=1 - ``` - - You will need to do this after installation and after every reboot to ensure - core macOS system services work through sandboxfs. - -3. Run a Bazel build with `--experimental_use_sandboxfs`. - - ```posix-terminal - bazel build {{ '' }}target{{ '' }} --experimental_use_sandboxfs - ``` - -**Troubleshooting** - -If you see `local` instead of `darwin-sandbox` or `linux-sandbox` as an -annotation for the actions that are executed, this may mean that sandboxing is -disabled. Pass `--genrule_strategy=sandboxed --spawn_strategy=sandboxed` to -enable it. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/6.5.0/extending/aspects.mdx b/6.5.0/extending/aspects.mdx deleted file mode 100644 index 4abaa0d..0000000 --- a/6.5.0/extending/aspects.mdx +++ /dev/null @@ -1,397 +0,0 @@ ---- -title: 'Aspects' ---- - - -This page explains the basics and benefits of using aspects and provides -simple and advanced examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/rules/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/rules/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/rules/rules#providers), can generate -[actions](/rules/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -are of type ``string`` and are called parameters. Parameters must have a``values`` -attribute specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -Parameter values for the aspect are taken from the string attribute with the same -name of the rule requesting the aspect (see the definition of ``file_count_rule``). -Aspects with parameters cannot be used via the command line because there is no -syntax to define the parameters. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. diff --git a/6.5.0/extending/concepts.mdx b/6.5.0/extending/concepts.mdx deleted file mode 100644 index 1e242cf..0000000 --- a/6.5.0/extending/concepts.mdx +++ /dev/null @@ -1,103 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/rules/tutorial-sharing-variables) - between two `BUILD` files. - -## Macros and rules - -A [macro](/rules/macros) is a function that instantiates rules. It is useful when a -`BUILD` file is getting too repetitive or too complex, as it allows you to reuse -some code. The function is evaluated as soon as the `BUILD` file is read. After -the evaluation of the `BUILD` file, Bazel has little information about macros: -if your macro generates a `genrule`, Bazel will behave as if you wrote the -`genrule`. As a result, `bazel query` will only list the generated `genrule`. - -A [rule](/rules/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most -users will never have to write one; they will only load and call existing -rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/tutorial-creating-a-macro) in order to - reuse some code. Then [learn more about macros](/rules/macros) and - [using them to create "custom verbs"](/rules/tutorial-custom-verbs). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/rules/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib/starlark-overview) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/rules/macros) and [rules](/rules/rules), you may want to write -[aspects](/rules/aspects) and [repository rules](/rules/repository_rules). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/6.5.0/extending/depsets.mdx b/6.5.0/extending/depsets.mdx deleted file mode 100644 index 1454c37..0000000 --- a/6.5.0/extending/depsets.mdx +++ /dev/null @@ -1,345 +0,0 @@ ---- -title: 'Depsets' ---- - - -[Depsets](/rules/lib/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/depset) for more details. - diff --git a/6.5.0/extending/exec-groups.mdx b/6.5.0/extending/exec-groups.mdx deleted file mode 100644 index c942855..0000000 --- a/6.5.0/extending/exec-groups.mdx +++ /dev/null @@ -1,238 +0,0 @@ ---- -title: 'Execution Groups' ---- - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/docs/toolchains) dependencies and -performs its own [toolchain resolution](/docs/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/attr#label) -attribute param and the -[`config`](/rules/lib/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -### Execution group inheritance - -In addition to defining its own constraints and toolchains, a new execution -group can declare that it wants to inherit from the rule's default execution -group, by passing the `copy_from_rule = True` parameter. It is an error to set -`copy_from_rule` to true and to also pass `exec_compatible_with` or -`toolchains`. - -An execution group that inherits from the default execution group copies -constraints, toolchains, and execution properties from the default. This -includes constraints and execution properties set on the target level, not just -those specified by the rule itself. In other words, given the following: - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “copied”: exec_group( - copy_from_rule = True, - # This will inherit exec_compatible_with and toolchains. - # Setting them here directly would be an error, however. - ), - }, - toolchains = ["//foo_tools:toolchain_type"], - exec_compatible_with = ["@platforms//os:linux"], -) - -# BUILD - -my_rule( - name = "demo", - exec_compatible_with = [":local_constraint"], -) -``` - -The `copied` execution group for the configured target `demo` will include all -of: -- `//fool_tools:toolchain_type` -- `@platforms//os:linux` -- `:local_constraint` - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/docs/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Creating exec groups to set exec properties - -Sometimes you want to use an exec group to give specific actions different exec -properties but don't actually want different toolchains or constraints than the -rule. For these situations, you can create exec groups using the `copy_from_rule` -parameter: - -```python -# foo.bzl - -# Creating an exec group with `copy_from_rule=True` is the same as explicitly -# setting the exec group's toolchains and constraints to the same values as the -# rule's respective parameters. -my_rule = rule( - _impl, - exec_compatible_with = ["@platforms//os:linux"], - toolchains = ["//foo:toolchain_type"], - exec_groups = { - # The following two groups have the same toolchains and constraints: - “foo”: exec_group(copy_from_rule = True), - "bar": exec_group( - exec_compatible_with = ["@platforms//os:linux"], - toolchains = ["//foo:toolchain_type"], - ), - }, -) - -# -``` - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/6.5.0/extending/platforms.mdx b/6.5.0/extending/platforms.mdx deleted file mode 100644 index 29c4d6c..0000000 --- a/6.5.0/extending/platforms.mdx +++ /dev/null @@ -1,252 +0,0 @@ ---- -title: 'Platforms' ---- - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/docs/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the - [`constraint_setting`](/reference/be/platform#constraint_setting) and - [`constraint_value`](/reference/be/platform#constraint_value) rules within `BUILD` files. `constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platform#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@local_config_platform//:host`. This is the autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//platforms:host_platform` -* `--platforms` - defaults to `@bazel_tools//platforms:target_platform` - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platform#constraint_value) that no platform -satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - ], -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/docs/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/6.5.0/extending/repo.mdx b/6.5.0/extending/repo.mdx deleted file mode 100644 index 8b60398..0000000 --- a/6.5.0/extending/repo.mdx +++ /dev/null @@ -1,142 +0,0 @@ ---- -title: 'Repository Rules' ---- - - -This page covers how to create repository rules and provides examples for -more details. - -An [external repository](/docs/external) is a rule that can be used only -in the `WORKSPACE` file and enables non-hermetic operation at the loading phase -of Bazel. Each external repository rule creates its own workspace, with its -own `BUILD` files and artifacts. They can be used to depend on third-party -libraries (such as Maven packaged libraries) but also to generate `BUILD` files -specific to the host Bazel is running on. - -## Repository rule creation - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals#repository_rule) function to create a new -repository rule and store it in a global variable. - -A custom repository rule can be used just like a native repository rule. It -has a mandatory `name` attribute and every target present in its build files -can be referred as `@//package:target` where `` is the value of the -`name` attribute. - -The rule is loaded when you explicitly build it, or if it is a dependency of -the build. In this case, Bazel will execute its `implementation` function. This -function describe how to create the repository, its content and `BUILD` files. - -## Attributes - -An attribute is a rule argument, such as `url` or `sha256`. You must list -the attributes and their types when you define a repository rule. - -```python -local_repository = repository_rule( - implementation=_impl, - local=True, - attrs={"path": attr.string(mandatory=True)}) -``` - -To access an attribute, use `repository_ctx.attr.`. - -All `repository_rule`s have implicitly defined attributes (just like build -rules). The two implicit attributes are `name` (just like for build rules) and -`repo_mapping`. The name of a repository rule is accessible with -`repository_ctx.name`. The meaning of `repo_mapping` is the same as for the -native repository rules -[`local_repository`](https://bazel.build/reference/be/workspace#local_repository.repo_mapping) -and -[`new_local_repository`](https://bazel.build/reference/be/workspace#new_local_repository.repo_mapping). - -If an attribute name starts with `_` it is private and users cannot set it. - -## Implementation function - -Every repository rule requires an `implementation` function. It contains the -actual logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repository. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to -access attribute values, and non-hermetic functions (finding a binary, -executing a binary, creating a file in the repository or downloading a file -from the Internet). See [the library](/rules/lib/repository_ctx) for more -context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -If the repository is declared as `local` then change in a dependency -in the dependency graph (including the `WORKSPACE` file itself) will -cause an execution of the implementation function. - -The implementation function can be _restarted_ if a dependency it -requests is _missing_. The beginning of the implementation function -will be re-executed after the dependency has been resolved. To avoid -unnecessary restarts (which are expensive, as network access might -have to be repeated), label arguments are prefetched, provided all -label arguments can be resolved to an existing file. Note that resolving -a path from a string or a label that was constructed only during execution -of the function might still cause a restart. - -Finally, for non-`local` repositories, only a change in the following -dependencies might cause a restart: - -- `.bzl` files needed to define the repository rule. -- Declaration of the repository rule in the `WORKSPACE` file. -- Value of any environment variable declared with the `environ` -attribute of the -[`repository_rule`](/rules/lib/globals#repository_rule) -function. The value of those environment variable can be enforced from -the command line with the -[`--action_env`](/reference/command-line-reference#flag--action_env) -flag (but this flag will invalidate every action of the build). -- Content of any file used and referred to by a label (for example, - `//mypkg:label.txt` not `mypkg/label.txt`). - -## Forcing refetch of external repositories - -Sometimes, an external repository can become outdated without any change to its -definition or dependencies. For example, a repository fetching sources might -follow a particular branch of a third-party repository, and new commits are -available on that branch. In this case, you can ask bazel to refetch all -external repositories unconditionally by calling `bazel sync`. - -Moreover, some rules inspect the local machine and might become -outdated if the local machine was upgraded. Here you can ask bazel to -only refetch those external repositories where the -[`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel sync --configure`. - - -## Examples - -- [C++ auto-configured toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): -it uses a repository rule to automatically create the -C++ configuration files for Bazel by looking for the local C++ compiler, the -environment and the flags the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies - needed to use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) creates - an external repository called `@maven` by default that generates build targets - for every Maven artifact in the transitive dependency tree. diff --git a/6.5.0/extending/rules.mdx b/6.5.0/extending/rules.mdx deleted file mode 100644 index 64f0ce0..0000000 --- a/6.5.0/extending/rules.mdx +++ /dev/null @@ -1,1269 +0,0 @@ ---- -title: 'Rules' ---- - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/rules/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/rules/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`cc_library` and `java_binary`, provide some core support for certain languages. -By defining your own rules, you can add similar support for languages and tools -that Bazel does not support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/rules/concepts#evaluation-model). This function does not run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [kind of rule](/reference/query#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable=True`), or specifically -a test executable (with `test=True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/rules/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/rules/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule cannot be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/attr#label_list), -[`attr.label`](/rules/lib/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user does not -specify in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/docs/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/docs/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/attr#output) and -[`attr.output_list`](/rules/lib/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which cannot be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/rules/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can not actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency via that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/starlark-provider) available as Starlark -global variables. - -For example, if a rule takes header files via a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -For the legacy style in which a [`struct`](/rules/lib/struct) is returned from a -target's implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/File) objects. Since Bazel does not -perform file I/O during the analysis phase, these objects cannot be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files cannot be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/ctx#files) contains a list of the default outputs of all -dependencies via that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive=transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file=True`. -[`ctx.executable`](/rules/lib/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable=True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using using -[`ctx.actions.declare_file`](/rules/lib/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/actions#declare_directory). Often, -the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/actions): - -* [`ctx.actions.run`](/rules/lib/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive=transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive=[headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with=",") - args.add_joined("-s", srcs, join_with=",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/rules/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it cannot depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs will not be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which will decide which actions are -executed. It is an error if there is a cycle in the dependency graph. Creating -an action does not guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/depset). - -A target's providers are specified by a list of `Provider` objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions will not be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/rules/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields={ - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances obey certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments do not match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# It's possible to define an init accepting positional arguments, but -# keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {'files_to_link': files_to_link, 'headers': all_headers} - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init) - -export ExampleInfo -``` - -A rule implementation may then instantiate the provider as follows: - -```python - ExampleInfo( - files_to_link=my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers - ) -``` - -The raw constructor can be used to define alternative public factory functions -that do not go through the `init` logic. For example, in exampleinfo.bzl we -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code cannot load it and -generate arbitrary provider instances. - -Another use for `init` is to simply prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals#rule.executable) or -[`test`](/rules/lib/globals#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/actions#write) action, pass `is_executable=True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you do not specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it does not support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes cannot be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/rules/macros) which alters the -default: - -```python -def example_test(size="small", **kwargs): - _example_test(size=size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment or command line -argument/flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs via [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/rules/aspects) and the rule target to which that aspect is applied, as -long as they do not define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/docs/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built in the -"target" configuration, while tools that should run on the execution platform -are built in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable=True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg="exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg="target"` in -the attribute. - -`cfg="target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable=False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg=my_transition` to use -[user-defined transitions](/rules/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/rules/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. -Because of this, there is a single "host" configuration, and a "host" transition -that can be used to build a dependency in the host configuration. Many rules -still use the "host" transition for their tools, but this is currently -deprecated and being migrated to use "exec" transitions where possible. - -There are numerous differences between the "host" and "exec" configurations: - -* "host" is terminal, "exec" isn't: Once a dependency is in the "host" - configuration, no more transitions are allowed. You can keep making further - configuration transitions once you're in an "exec" configuration. -* "host" is monolithic, "exec" isn't: There is only one "host" configuration, - but there can be a different "exec" configuration for each execution - platform. -* "host" assumes you run tools on the same machine as Bazel, or on a - significantly similar machine. This is no longer true: you can run build - actions on your local machine, or on a remote executor, and there's no - guarantee that the remote executor is the same CPU and OS as your local - machine. - -Both the "exec" and "host" configurations apply the same option changes, (for example, -set `--compilation_mode` from `--host_compilation_mode`, set `--cpu` from -`--host_cpu`, etc). The difference is that the "host" configuration starts with -the **default** values of all other flags, whereas the "exec" configuration -starts with the **current** values of flags, based on the target configuration. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/starlark-configuration-fragment) such as -`cpp`, `java` and `jvm`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - host_fragments = ["java"], # Required fragments of the host configuration - ... -) -``` - -`ctx.fragments` only provides configuration fragments for the target -configuration. If you want to access fragments for the host configuration, use -`ctx.host_fragments` instead. - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the workspace. - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/ctx#coverage_instrumented) returns true in -coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"host"` or `"exec"` in the attribute schema) in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explcit coverage configuration for all rules in the -dependency chain.) - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules cannot do this validation at analysis time because rules cannot read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action does not produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action will not be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the host or exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path]) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec"), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel does not currently -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/ctx#runfiles) and the [`runfiles`](/rules/lib/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a struct returned by the rule's implementation function. - -*This style is deprecated and should not be used in new code;* see below for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x="foo", ...) - modern_data = MyInfo(y="bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus do not create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/OutputGroupInfo). - -In [`provides`](/rules/lib/globals#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `*Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the above syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept/require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/6.5.0/extending/toolchains.mdx b/6.5.0/extending/toolchains.mdx deleted file mode 100644 index 8b19fbc..0000000 --- a/6.5.0/extending/toolchains.mdx +++ /dev/null @@ -1,564 +0,0 @@ ---- -title: 'Toolchains' ---- - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/rules/rules) and [platforms](/docs/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains[""//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platform#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `WORKSPACE` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", -) -``` - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms-intro#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as input a -set of required toolchain types, the target platform, the list of available -execution platforms, and the list of available toolchains. Its outputs are a -selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -`WORKSPACE` file via -[`register_execution_platforms`](/rules/lib/globals#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals#register_toolchains). -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/reference/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/docs/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/docs/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/6.5.0/external/advanced.mdx b/6.5.0/external/advanced.mdx deleted file mode 100644 index d468d5d..0000000 --- a/6.5.0/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/6.5.0/external/lockfile.mdx b/6.5.0/external/lockfile.mdx deleted file mode 100644 index e478bce..0000000 --- a/6.5.0/external/lockfile.mdx +++ /dev/null @@ -1,190 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the resolution process when there are no changes in project -dependencies. Furthermore, the lockfile improves stability by preventing -unexpected updates or breaking changes in external libraries, thereby reducing -the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. The lockfile -captures the current state of the project, including the MODULE file, flags, -overrides, and other relevant information. Importantly, it only includes -dependencies that are included in the current invocation of the build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): If the project state matches the lockfile, the - resolution result is immediately returned from the lockfile. Otherwise, - resolution is executed, and the lockfile is updated to reflect the current - state. -* `error`: If the project state matches the lockfile, the resolution result is - returned from the lockfile. Otherwise, Bazel throws an error indicating the - variations between the project and the lockfile. This mode is particularly - useful when you want to ensure that your project's dependencies remain - unchanged, and any differences are treated as errors. -* `off`: The lockfile is not checked at all. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Efficient resolution skipping.** The lockfile enables Bazel to skip the - resolution process if there are no changes in the project dependencies since - the last build. This significantly improves build efficiency, especially in - scenarios where resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Inputs of the module resolution, such as `moduleFileHash`, `flags` and - `localOverrideHashes`, as well as the output of the resolution, which is - `moduleDepGraph`. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `transitiveDigest`, and the output of running that extension - referred to as `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 1, - "moduleFileHash": "b0f47b98a67ee15f9.......8dff8721c66b721e370", - "flags": { - "cmdRegistries": [ - "https://bcr.bazel.build/" - ], - "cmdModuleOverrides": {}, - "allowedYankedVersions": [], - "envVarAllowedYankedVersions": "", - "ignoreDevDependency": false, - "directDependenciesMode": "WARNING", - "compatibilityMode": "ERROR" - }, - "localOverrideHashes": { - "bazel_tools": "b5ae1fa37632140aff8.......15c6fe84a1231d6af9" - }, - "moduleDepGraph": { - "": { - "name": "", - "version": "", - "executionPlatformsToRegister": [], - "toolchainsToRegister": [], - "extensionUsages": [ - { - "extensionBzlFile": "extension.bzl", - "extensionName": "lockfile_ext" - } - ], - ... - } - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "transitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } -} -``` - -### Module File Hash - -The `moduleFileHash` represents the hash of the `MODULE.bazel` file contents. If -any changes occur in this file, the hash value differs. - -### Flags - -The `Flags` object stores all the flags that can affect the resolution result. - -### Local Override Hashes - -If the root module includes `local_path_overrides`, this section stores the hash -of the `MODULE.bazel` file in the local repository. It allows tracking changes -to this dependency. - -### Module Dependency Graph - -The `moduleDepGraph` represents the result of the resolution process using the -inputs mentioned above. It forms the dependency graph of all the modules -required to run the project. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -Each entry in this map corresponds to a used extension and is identified by its -containing file and name. The corresponding value for each entry contains the -relevant information associated with that extension: - -1. The `transitiveDigest` the digest of the extension implementation and its - transitive .bzl files. -2. The `generatedRepoSpecs` the result of running that extension with the - current input. - -An additional factor that can affect the extension results is their _usages_. -Although not stored in the lockfile, the usages are considered when comparing -the current state of the extension with the one in the lockfile. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. diff --git a/6.5.0/external/module.mdx b/6.5.0/external/module.mdx deleted file mode 100644 index 63e5e4c..0000000 --- a/6.5.0/external/module.mdx +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root (next to the -`WORKSPACE` file). This file is the module's manifest, declaring its name, -version, list of direct dependencies, and other information. For a basic -example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -// TODO(wyv): add link to MODULE.bazel globals - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals#archive_override) -* [`git_override`](/rules/lib/globals#git_override) -* [`local_path_override`](/rules/lib/globals#local_path_override) - -## Repository names and strict deps - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is `{{ "" }}module_name{{ "" }}~{{ "" }}version{{ -"" }}` (for example, `bazel_skylib~1.0.3`). For modules with a -non-registry override, replace the `{{ "" }}version{{ "" }}` part -with the string `override`. Note that the canonical name format is not an API -you should depend on and is subject to change at any time. - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/6.5.0/help.mdx b/6.5.0/help.mdx deleted file mode 100644 index 29a5fab..0000000 --- a/6.5.0/help.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Getting Help' ---- - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel developer group](https://groups.google.com/g/bazel-dev) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Emailing the [Bazel developer group](https://groups.google.com/g/bazel-dev) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/6.5.0/install/bazelisk.mdx b/6.5.0/install/bazelisk.mdx deleted file mode 100644 index a9ee8bd..0000000 --- a/6.5.0/install/bazelisk.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/6.5.0/install/compile-source.mdx b/6.5.0/install/compile-source.mdx deleted file mode 100644 index 9ef417d..0000000 --- a/6.5.0/install/compile-source.mdx +++ /dev/null @@ -1,293 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 11 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-11-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 11 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk1.8.0_112" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk1.8.0_112" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/6.5.0/install/completion.mdx b/6.5.0/install/completion.mdx deleted file mode 100644 index 727ed9a..0000000 --- a/6.5.0/install/completion.mdx +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/6.5.0/install/docker-container.mdx b/6.5.0/install/docker-container.mdx deleted file mode 100644 index 53c8504..0000000 --- a/6.5.0/install/docker-container.mdx +++ /dev/null @@ -1,132 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - l.gcr.io/google/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - l.gcr.io/google/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your developement workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash l.gcr.io/google/bazel:latest -``` - -Each container id is unique. In the instructions bellow, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -root@5a99103747c6:~# git clone https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -root@5a99103747c6:~/abseil-cpp# bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -root@5a99103747c6:~/abseil-cpp# bazel build --config=--config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash l.gcr.io/google/bazel:latest -root@5a99103747c6:/# -``` - -Explore the container contents. - -```posix-terminal -root@5a99103747c6:/# clang --version -clang version 8.0.0 (trunk 340178) -Target: x86_64-unknown-linux-gnu -Thread model: posix -InstalledDir: /usr/local/bin - -root@5a99103747c6:/# java -version -openjdk version "1.8.0_181" -OpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-0ubuntu0.16.04.1-b13) -OpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode) - -root@5a99103747c6:/# python -V -Python 2.7.12 - -root@5a99103747c6:/# python3 -V -Python 3.6.6 - -root@5a99103747c6:/# bazel version -Extracting Bazel installation... -Build label: 0.17.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Sep 14 10:39:25 2018 (1536921565) -Build timestamp: 1536921565 -Build timestamp as int: 1536921565 -``` diff --git a/6.5.0/install/ide.mdx b/6.5.0/install/ide.mdx deleted file mode 100644 index 6372689..0000000 --- a/6.5.0/install/ide.mdx +++ /dev/null @@ -1,114 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or email -the [bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing list. - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[Tulsi](https://tulsi.bazel.build) and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode projects from -Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete in a wide variety of extensible editors, including VSCode, Vim, Emacs, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/6.5.0/install/index.mdx b/6.5.0/install/index.mdx deleted file mode 100644 index 7faedcb..0000000 --- a/6.5.0/install/index.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux](https://www.archlinux.org/packages/community/x86_64/bazel/) -* [Fedora 25, 26, 27, 28, and CentOS 7](/install/redhat) -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Linuxbrew](https://github.com/Linuxbrew/homebrew-core/blob/master/Formula/bazel.rb) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://oplab9.parqtec.unicamp.br/pub/ppc64el/bazel) - -For other platforms, you can try to [compile from source](/install/compile-source). diff --git a/6.5.0/install/os-x.mdx b/6.5.0/install/os-x.mdx deleted file mode 100644 index 7293a84..0000000 --- a/6.5.0/install/os-x.mdx +++ /dev/null @@ -1,139 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install Homebrew (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL \ -https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` diff --git a/6.5.0/install/redhat.mdx b/6.5.0/install/redhat.mdx deleted file mode 100644 index 9461d92..0000000 --- a/6.5.0/install/redhat.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: 'Installing Bazel on Fedora and CentOS' ---- - - -This page describes how to install Bazel on Fedora and CentOS. - -The Bazel team does not provide official packages for Fedora and CentOS. -Vincent Batts ([@vbatts](https://github.com/vbatts)) generously maintains -unofficial packages on -[Fedora COPR](https://copr.fedorainfracloud.org/coprs/vbatts/bazel/). - -The commands below must be run either via `sudo` or while logged in as `root`. - -Add `--allowerasing` when installing an upgrade from a previous major -version of the Bazel package. - -[The Bazelisk installer](/install/bazelisk) is an alternative to package installation. - -## Installing on Fedora 25+ - -1. The [DNF](https://fedoraproject.org/wiki/DNF) package manager can - install Bazel from the [COPR](https://copr.fedorainfracloud.org/) repository. - Install the `copr` plugin for DNF if you have not already done so. - - ```posix-terminal - dnf install dnf-plugins-core - ``` - -2. Run the following commands to add the Bazel repository and install the - package: - - ```posix-terminal - dnf copr enable vbatts/bazel - - dnf install bazel4 - ``` - -## Installing on CentOS 7 - -1. Download the corresponding `.repo` file from - [Fedora COPR](https://copr.fedorainfracloud.org/coprs/vbatts/bazel/repo/epel-7/vbatts-bazel-epel-7.repo) - and copy it to `/etc/yum.repos.d/`. - -2. Run the following command: - - ```posix-terminal - yum install bazel4 - ``` diff --git a/6.5.0/install/suse.mdx b/6.5.0/install/suse.mdx deleted file mode 100644 index 0ac88e3..0000000 --- a/6.5.0/install/suse.mdx +++ /dev/null @@ -1,34 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/6.5.0/install/ubuntu.mdx b/6.5.0/install/ubuntu.mdx deleted file mode 100644 index 5703e30..0000000 --- a/6.5.0/install/ubuntu.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -### Step 1: Add Bazel distribution URI as a package source - -## Using Bazel's apt repository - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -# Ubuntu 16.04 (LTS) uses OpenJDK 8 by default: -sudo apt install openjdk-8-jdk - -# Ubuntu 18.04 (LTS) uses OpenJDK 11 by default: -sudo apt install openjdk-11-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -# Ubuntu 16.04 (LTS) uses OpenJDK 8 by default: -sudo apt-get install openjdk-8-jdk - -# Ubuntu 18.04 (LTS) uses OpenJDK 11 by default: -sudo apt-get install openjdk-11-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. diff --git a/6.5.0/migrate/index.mdx b/6.5.0/migrate/index.mdx deleted file mode 100644 index 215de29..0000000 --- a/6.5.0/migrate/index.mdx +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/6.5.0/migrate/maven.mdx b/6.5.0/migrate/maven.mdx deleted file mode 100644 index 17ecd33..0000000 --- a/6.5.0/migrate/maven.mdx +++ /dev/null @@ -1,256 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences -between Maven and Bazel, and provides a migration example using the -Guava project. - -When migrating from any build tool to Bazel, it's best to have both build -tools running in parallel until you have fully migrated your development team, -CI system, and any other relevant systems. You can run Maven and Bazel in the -same repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external), -it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial - [Introduction to Bazel: Build Java](/tutorials/java) before you start - migrating. The tutorial explains Bazel's concepts, structure, and label - syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build - files and multiple targets per `BUILD` file, allowing for builds that - are more incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does - not automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the WORKSPACE file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the -[Guava project](https://github.com/google/guava) from Maven to Bazel. -The Guava project used is release `v31.1`. The examples using Guava do not walk through -each step in the migration, but they do show the files and contents that are -generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the WORKSPACE file - -Create a file named `WORKSPACE` at the root of your project. If your project -has no external dependencies, the workspace file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the workspace -file. To automate the listing of external dependencies for the workspace file, -use `rules_jvm_external`. For instructions about using this ruleset, see -[the README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external). - -Note: The previously recommended tool, `generate_workspace`, is no longer -maintained by the Bazel team. - -#### Guava project example: external dependencies - -You can list the external dependencies of the -[Guava project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `WORKSPACE` file: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "4.3" -RULES_JVM_EXTERNAL_SHA = "6274687f6fc5783b589f56a2f1ed60de3ce1f99bc4e8f9edef3de43bdf7c6e74" - -http_archive( - name = "rules_jvm_external", - sha256 = RULES_JVM_EXTERNAL_SHA, - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your project -should be built. Unlike Maven with its one `pom.xml` file, Bazel can use many -`BUILD` files to build a project. These files specify multiple build targets, -which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file -at the root of your project and using it to do an initial build using Bazel. -Then, you refine your build by adding more `BUILD` files with more granular -targets. - -1. In the same directory as your `WORKSPACE` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to - build your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples above, - the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. For example, if you generated a list of external - dependencies using the tool `generate_workspace`, the dependencies - for `java_library` are the libraries listed in the - `generated_java_libraries` macro. - * Take a look at the - [example below of this top-level BUILD file](#guava-2) from - the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build - your project to ensure that it works. On the command line, from your - workspace directory, use `bazel build //:everything` to build your - project with Bazel. - - The project has now been successfully built with Bazel. You will need - to add more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used -to build the entire project. Here are the contents of this initial `BUILD` -file in the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your first -build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with - Java packages that have the fewest dependencies and work you way up - to packages with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number - of packages grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add - a `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build - file. Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel -to produce all of your builds. diff --git a/6.5.0/migrate/xcode.mdx b/6.5.0/migrate/xcode.mdx deleted file mode 100644 index d867963..0000000 --- a/6.5.0/migrate/xcode.mdx +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps -for converting an Xcode project to a Bazel project. It also provides -troubleshooting solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present - within the workspace directory or specified as imports in the `WORKSPACE` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the - [iOS app tutorial](/tutorials/ios-app). You should understand the Bazel - workspace, including the `WORKSPACE` and `BUILD` files, as well as the - concepts of targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see -[Working with external dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `WORKSPACE` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `WORKSPACE` file - -Create a `WORKSPACE` file in a new directory. This directory becomes the Bazel -workspace root. If the project uses no external dependencies, this file can be -empty. If the project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the `WORKSPACE` -file. - -Note: Place the project source code within the directory tree containing the - `WORKSPACE` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with [swift_bazel](https://github.com/cgrindel/swift_bazel), -you must convert them into Bazel packages as described in the [following tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/). - -Note: SwiftPM support is a manual process with many variables. -SwiftPM integration with Bazel has not been fully verified and is not -officially supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create -the `BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see -[Workspaces, packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a [`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an [`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build rules](https://github.com/bazelbuild/rules_apple) support -running library-based unit tests on iOS and macOS, as well as application-based -tests on macOS. For application-based tests on iOS or UI tests on either -platform, Bazel will build the test outputs but the tests must run within Xcode -through a project generated with rules_xcodeproj. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to run library-based unit tests on iOS. For tests requiring the iOS - simulator, Bazel will build the test outputs but not run the tests. You must - [generate an Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - and run the tests from within Xcode. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build outputs required to run user interface tests in the iOS simulator - using Xcode. You must [generate an Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - and run the tests from within Xcode. Bazel cannot natively run UI tests. - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) -target for each Objective-C library and a [`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) -function to include all sources and/or headers of a certain type. Use it -carefully as it might include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in the -[rules_apple examples directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - - -For more information on build rules, see [Apple Rules for Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number - of packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix - build errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `WORKSPACE` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features). - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/6.5.0/query/aquery.mdx b/6.5.0/query/aquery.mdx deleted file mode 100644 index 0164a9a..0000000 --- a/6.5.0/query/aquery.mdx +++ /dev/null @@ -1,384 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/reference/query#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/docs/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/rules/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/rules/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noexperimental_slim_json_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/6.5.0/query/cquery.mdx b/6.5.0/query/cquery.mdx deleted file mode 100644 index 17a924b..0000000 --- a/6.5.0/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - -`cquery` is a variant of [`query`](/reference/query) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/rules/concepts#evaluation-model), -which integrates these effects. `query`, by constrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to `[test_suite](/reference/be/general#test_suite)` -rules as they are not configured targets. For the former, see `[aquery](/docs/aquery)`. - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/reference/query#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query How-To](/query/quickstart) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/rules/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -The host configuration uses the special ID `(HOST)`. Non-generated source files, like -those commonly found in `srcs`, use the special ID `(null)` (because they -don't need to be configured). - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/reference/query#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on a host-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (HOST) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/reference/query#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/reference/query#functions "list of query functions") -supported by `query`, `cquery` supports all but [`visible`](/reference/query#visible), -[`siblings`](/reference/query#siblings), [`buildfiles`](/reference/query#buildfiles), -and [`tests`](/reference/query#tests). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `target`, `host`, `null`, or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a prevous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, host)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (HOST) -//baz (3732cc8) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/rules/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -``` - -Genrules configure their tools in the -[host configuration](/rules/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(hostconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/rules/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -[Aspects](/rules/aspects) can add -additional dependencies to a build. By default, `cquery` doesn't follow aspects because -they make the queryable graph bigger, which uses more memory. But following them produces more -accurate results. - -If you're not worried about the memory impact of large queries, enable this flag by default in -your bazelrc. - -If you query with aspects disabled, you can experience a problem where target X fails while -building target Y but `cquery somepath(Y, X)` and `cquery deps(Y) | grep 'X' -` return no results because the dependency occurs through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/rules/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the host configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/reference/query#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/reference/query#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/reference/query#graph-nodelimit) and -[`--graph:factored`](/reference/query#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/rules/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/rules/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s intepretation of - the [query language](/reference/query) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The `[config](#config)` function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/rules/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/reference/query#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genquery` exerts a host transition on -its `tools` attribute - that is, it configures its tools in the -[host configuration](/rules/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (host_config) -... - - $ bazel cquery "//foo:tool" -tool(host_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=<whatever>` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/6.5.0/reference/glossary.mdx b/6.5.0/reference/glossary.mdx deleted file mode 100644 index 5c549cb..0000000 --- a/6.5.0/reference/glossary.mdx +++ /dev/null @@ -1,627 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/rules/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/rules/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/rules/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/docs/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/rules/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). A fully-qualified label such as -`//path/to/package:target` consists of `//` to mark the workspace root -directory, `path/to/package` as the directory that contains the [`BUILD` -file](#build-file) declaring the target, and `:target` as the name of the target -declared in the aforementioned `BUILD` file. May also be prefixed with -`@my_repository//<..>` to indicate that the target is declared in an ]external -repository] named `my_repository`. - -### Loading phase - -The first phase of a build where Bazel parses `WORKSPACE`, `BUILD`, and [`.bzl` -files](#bzl-file) to create [packages](#package). [Macros](#macro) and certain -functions like `glob()` are evaluated in this phase. Interleaved with the second -phase of the build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) function. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target declarations -during the [loading phase](#loading-phase). - -**See also:** [Macro documentation](/rules/macros) - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree. Located in the [output -user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the workspace root. A -package can contain subpackages, or subdirectories containing `BUILD` files, -thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/rules/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/docs/query-how-to), [Query reference](/reference/query) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through repository -rules like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -// TODO: ### Repository rule - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/rules/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/rules/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/rules/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/rules/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/rules/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -A directory containing a `WORKSPACE` file and source code for the software you -want to build. Labels that start with `//` are relative to the workspace -directory. - -### WORKSPACE file - -Defines a directory to be a [workspace](#workspace). The file can be empty, -although it usually contains external repository declarations to fetch -additional dependencies from the network or local filesystem. diff --git a/6.5.0/reference/skyframe.mdx b/6.5.0/reference/skyframe.mdx deleted file mode 100644 index 724e526..0000000 --- a/6.5.0/reference/skyframe.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: 'Skyframe' ---- - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build consists of evaluating the node that represents the build request (this is the state we are striving for, but there is a lot of legacy code in the way). First its `SkyFunction` is found and called with the key of the top-level `SkyKey`. The function then requests the evaluation of the nodes it needs to evaluate the top-level node, which in turn result in other function invocations, and so on, until the leaf nodes are reached (which are usually nodes representing input files in the file system). Finally, we end up with the value of the top-level `SkyValue`, some side effects (such as output files in the file system) and a directed acyclic graph of the dependencies between the nodes that were involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in advance all of the nodes it needs to do its job. A simple example is evaluating an input file node that turns out to be a symlink: the function tries to read the file, realizes that it’s a symlink, and thus fetches the file system node representing the target of the symlink. But that itself can be a symlink, in which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the services provided to it by an interface called `SkyFunction.Environment`. These are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If the node is available, its value is returned, otherwise, `null` is returned and the function itself is expected to return `null`. In the latter case, the dependent node is evaluated, and then the original node builder is invoked again, but this time the same `env.getValue` call will return a non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. This does essentially the same, except that the dependent nodes are evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs to be taken that two different functions do not step on each other’s toes. In general, write side effects (where data flows outwards from Bazel) are okay, read side effects (where data flows inwards into Bazel without a registered dependency) are not, because they are an unregistered dependency and as such, can cause incorrect incremental builds. - -`SkyFunction` implementations should not access data in any other way than requesting dependencies (such as by directly reading the file system), because that results in Bazel not registering the data dependency on the file that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on other nodes, Bazel can guarantee that if the input state is the same, the same data is returned. If all sky functions are deterministic, this means that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions is recorded, Bazel can invalidate only the exact set of nodes that need to be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of requesting dependencies, functions that do not depend on each other can be run in parallel and Bazel can guarantee that the result is the same as if they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel can build up a complete data flow graph from the input files to the output files, and use this information to only rebuild those nodes that actually need to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one and the top-down one. Which one is optimal depends on how the dependency graph looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed inputs is known, all the nodes are invalidated that transitively depend on changed files. This is optimal if we know that the same top-level node will be built again. Note that bottom-up invalidation requires running `stat()` on all input files of the previous build to determine if they were changed. This can be improved by using `inotify` or a similar mechanism to learn about changed files. - -* During top-down invalidation, the transitive closure of the top-level node is checked and only those nodes are kept whose transitive closure is clean. This is better if we know that the current node graph is large, but we only need a small subset of it in the next build: bottom-up invalidation would invalidate the larger graph of the first build, unlike top-down invalidation, which just walks the small graph of second build. - -We currently only do bottom-up invalidation. - -To get further incrementality, we use _change pruning_: if a node is invalidated, but upon rebuild, it is discovered that its new value is the same as its old value, the nodes that were invalidated due to a change in this node are “resurrected”. - -This is useful, for example, if one changes a comment in a C++ file: then the `.o` file generated from it will be the same, thus, we don’t need to call the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an all-or-nothing affair: when a dependency changes, the dependent node is always rebuilt from scratch, even if a better algorithm would exist that would mutate the old value of the node based on the changes. A few examples where this would be useful: - -* Incremental linking -* When a single `.class` file changes in a `.jar`, we could theoretically modify the `.jar` file instead of building it from scratch again. - -The reason why Bazel currently does not support these things in a principled way (we have some measure of support for incremental linking, but it’s not implemented within Skyframe) is twofold: we only had limited performance gains and it was hard to guarantee that the result of the mutation is the same as that of a clean rebuild would be, and Google values builds that are bit-for-bit repeatable. - -Until now, we could always achieve good enough performance by simply decomposing an expensive build step and achieving partial re-evaluation that way: it splits all the classes in an app into multiple groups and does dexing on them separately. This way, if classes in a group do not change, the dexing does not have to be redone. - -## Mapping to Bazel concepts - -This is a rough overview of some of the `SkyFunction` implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, we also compute additional information in order to detect changes to the file. This is the lowest level node in the Skyframe graph and has no dependencies. -* **FileValue**. Used by anything that cares about the actual contents and/or resolved path of a file. Depends on the corresponding `FileStateValue` and any symlinks that need to be resolved (such as the `FileValue` for `a/b` needs the resolved path of `a` and the resolved path of `a/b`). The distinction between `FileStateValue` is important because in some cases (for example, evaluating file system globs (such as `srcs=glob(["*/*.java"])`) the contents of the file are not actually needed. -* **DirectoryListingValue**. Essentially the result of `readdir()`. Depends on the associated `FileValue` associated with the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on the `FileValue` of the associated `BUILD` file, and also transitively on any `DirectoryListingValue` that is used to resolve the globs in the package (the data structure representing the contents of a `BUILD` file internally) -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple of the set of actions generated during the analysis of a target and information provided to configured targets that depend on this one. Depends on the `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` of direct dependencies, and a special node representing the build configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an output artifacts (artifacts are almost equivalent to files, and are used to refer to files during the actual execution of build steps). For source files, it depends on the `FileValue` of the associated node, for output artifacts, it depends on the `ActionExecutionValue` of whatever action generates the artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on the `ArtifactValues` of its input files. The action it executes is currently contained within its sky key, which is contrary to the concept that sky keys should be small. We are working on solving this discrepancy (note that `ActionExecutionValue` and `ArtifactValue` are unused if we do not run the execution phase on Skyframe). diff --git a/6.5.0/release/backward-compatibility.mdx b/6.5.0/release/backward-compatibility.mdx deleted file mode 100644 index 6b52ee8..0000000 --- a/6.5.0/release/backward-compatibility.mdx +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - -This page provides information on how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an -[LTS major version](/release/versioning#lts-releases) are fully backward-compatible. -Changes between major LTS releases may contain incompatible changes that require -some migration effort. For more information on how the Bazel release cadence -works, see -[Announcing Bazel Long Term Support (LTS) releases](https://blog.bazel.build/2020/11/10/long-term-support-release.html). - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains - the change in behavior and aims to provide a migration recipe. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/versions/updating-bazel) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code -(`BUILD` and `.bzl` files, as well as any Bazel usage in scripts, -usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD (therefore, also with the next Bazel rolling release), it should be marked with the `migration-ready` label. The incompatible change issue is closed when the incompatible flag is flipped at HEAD. diff --git a/6.5.0/release/index.mdx b/6.5.0/release/index.mdx deleted file mode 100644 index f9aec49..0000000 --- a/6.5.0/release/index.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: 'Release Policy' ---- - - -Bazel maintains a -[Long Term Support (LTS)](/release/versioning) -release model, where a major version is released every nine months and minor -versions are released monthly. This page covers the Bazel release policy, -including the release candidates, timelines, announcements, and testing. - -Bazel releases can be found on -[GitHub](https://github.com/bazelbuild/bazel/releases). - -## Release candidates - -A release candidate for a new version of Bazel is usually created at the -beginning of every month. The work is tracked by a -[release bug on GitHub](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Arelease) -indicating a target release date, and is assigned to the current Release manager. -Release candidates should pass all Bazel unit tests, and show no unwanted -regression in the projects tested on [Buildkite](https://buildkite.com/bazel). - -Release candidates are announced on -[bazel-discuss](https://groups.google.com/g/bazel-discuss). -Over the next days, the Bazel team monitors community bug reports for any -regressions in the candidates. - -## Releasing - -If no regressions are discovered, the candidate is officially released after -one week. However, regressions can delay the release of a release candidate. If -regressions are found, the Bazel team applies corresponding cherry-picks to the -release candidate to fix those regressions. If no further regressions are found -for two consecutive business days beginning after one week since the first -release candidate, the candidate is released. - -New features are not cherry-picked into a release candidate after it is cut. -Moreover, if a new feature is buggy, the feature may be rolled back from a -release candidate. Only bugs that have the potential to highly impact or break -the release build are fixed in a release candidate after it is cut. - -A release is only released on a day where the next day is a business day. - -If a critical issue is found in the latest release, the Bazel team creates a -patch release by applying the fix to the release. Because this patch updates an -existing release instead of creating a new one, the patch release candidate can -be released after two business days. - -## Testing - -A nightly build of all projects running on -[ci.bazel.build](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) is run, using Bazel -binaries built at head, and release binaries. Projects going to be impacted by a -breaking change are notified. - -When a release candidate is issued, other Google projects like -[TensorFlow](https://tensorflow.org) are tested on their complete -test suite using the release candidate binaries. If you have a critical project -using Bazel, we recommend that you establish an automated testing process that -tracks the current release candidate, and report any regressions. diff --git a/6.5.0/release/versioning.mdx b/6.5.0/release/versioning.mdx deleted file mode 100644 index 5c48b13..0000000 --- a/6.5.0/release/versioning.mdx +++ /dev/null @@ -1,94 +0,0 @@ ---- -title: 'Release Versioning' ---- - - -Bazel 4.0 and higher provides support for two release tracks: long term support -(LTS) releases and rolling releases. This page covers versioning in Bazel, the -types of releases, and the benefits of those releases for Bazel users and -contributors. - -## Understanding versioning on Bazel - -Bazel uses a _major.minor.patch_ semantic versioning scheme. - -* A _major release_ contains features that are not backward compatible with the - previous release. -* A _minor release_ contains new backward-compatible features. -* A _patch release_ contains minor changes and bug fixes. - -Using version 3.5.1 as an example, a new release of each type would result in -these version numbers: - -* Major: 4.0 -* Minor: 3.6 -* Patch: 3.5.2 - -## Bazel's release cycle - -Bazel continually publishes rolling releases. Every major version is an LTS -release. You can choose to follow either release cadence - updating from one -LTS release to the next, or updating with each minor version release. - -The image shows both rolling and LTS releases, and the expected support for -each. - -![Roadmap](/docs/images/roadmap.png "Roadmap") - -**Figure 1.** Rolling and LTS releases. - -## Release branches - -Each major version becomes a separate development branch on release. You can -receive fixes to critical bugs on that branch without having to update to the -Bazel release at head. Additional features on your major version branch become -minor releases and the highest version on the branch is the supported version. - -Each Bazel release is paired with a list of recommended rule versions that work -together and there is strict backwards compatibility within each branch. - -## LTS releases - -An LTS release is a major version (such as, 4.0) that is supported for 3 years -after its release. -A major version is released approximately every nine months. - -Ongoing development on a release branch results in minor versions. - -You can choose to pin your project to a major release and update to a newer -version in your own time. This gives you time to preview upcoming changes and -adapt to them in advance. - -## Rolling releases - -Rolling releases are periodically cut from Bazel's main branch. -This release cadence involves a continuous delivery of preview releases of the -next major Bazel version, which are in sync with Google’s internal Blaze -releases. - -Note that a new rolling release can contain breaking changes that are -incompatible with previous releases. - -Rolling releases are tested on Bazel's test suite on Bazel CI and -Google’s internal test suite. Incompatible flags may be -used to ease the burden of migrating to new functionality, but default behaviors -may change with any rolling release. (You can also use rolling releases to -preview the next LTS version. For example, `5.0.0-pre.20210604.6` is based on a -candidate cut on 2021-06-04 and represents a milestone towards the 5.0 LTS -release.) - -You can download the latest rolling release from -[GitHub](https://github.com/bazelbuild/bazel/releases). -Alternatively, you can set up -[Bazelisk v1.9.0](https://github.com/bazelbuild/bazelisk/releases/tag/v1.9.0) -(or later) to use a specific version name or the -“rolling” identifier, which uses the most recent rolling release. For more -details, see the -[Bazelisk documentation](https://github.com/bazelbuild/bazelisk#how-does-bazelisk-know-which-bazel-version-to-run). - -## Updating versions - -* For more information on updating your Bazel version, see - [Updating Bazel](/versions/updating-bazel). -* For more information on contributing updates to new Bazel releases, see - [Contributing to Bazel](/contribute). diff --git a/6.5.0/remote/bep-examples.mdx b/6.5.0/remote/bep-examples.mdx deleted file mode 100644 index 8476241..0000000 --- a/6.5.0/remote/bep-examples.mdx +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/rules/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/rules/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/6.5.0/remote/bep-glossary.mdx b/6.5.0/remote/bep-glossary.mdx deleted file mode 100644 index 75c0f3d..0000000 --- a/6.5.0/remote/bep-glossary.mdx +++ /dev/null @@ -1,417 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer messsage - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/rules/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/rules/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/docs/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--expand_configs_in_place", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/docs/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/6.5.0/remote/bep.mdx b/6.5.0/remote/bep.mdx deleted file mode 100644 index 9beccca..0000000 --- a/6.5.0/remote/bep.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L91) -or [structured -information](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L123) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L469). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/docs/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/docs/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/docs/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_best_effort` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--project_id` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/docs/remote-caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/6.5.0/remote/cache-local.mdx b/6.5.0/remote/cache-local.mdx deleted file mode 100644 index e022f6d..0000000 --- a/6.5.0/remote/cache-local.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/docs/remote-execution-caching-debug). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/docs/remote-execution-caching-debug#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/docs/remote-execution-caching-debug#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/docs/remote-execution-caching-debug#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/6.5.0/remote/cache-remote.mdx b/6.5.0/remote/cache-remote.mdx deleted file mode 100644 index fb40ece..0000000 --- a/6.5.0/remote/cache-remote.mdx +++ /dev/null @@ -1,178 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_binary_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs-the-execution-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - - If you do not need to diff execution logs, you can use the - human-readable `--execution_log_json_file` flag instead. It cannot be - used for stable diffing since it contains execution time and doesn't - guarantee ordering. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the human-readable `progress_message` - field in the execution log to help determine where the action is coming from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/docs/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_binary_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_binary_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -Execution logs contain records of all actions executed during the build. For -each action there is a -[SpawnExec](https://github.com/bazelbuild/bazel/blob/42389d9468a954f3793a19f8e026b022b39aefca/src/main/protobuf/spawn.proto#L67) -element containing all of the information from the action key, Thus, if the -logs are identical then so are the action cache keys. - -To compare logs for two builds that are not sharing cache hits as expected, -do the folowing: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to parse the - execution logs with the - [execlog parser](https://source.bazel.build/bazel/+/master:src/tools/execlog/). - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - -3. Use the execution log parser to convert the logs to text. The following - invocation also sorts the actions in the second log to match the action order - in the first log for ease of comparison. - - bazel build src/tools/execlog:parser - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/6.5.0/remote/caching.mdx b/6.5.0/remote/caching.mdx deleted file mode 100644 index 0773e6d..0000000 --- a/6.5.0/remote/caching.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Remote Caching' ---- - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/docs/remote-caching-debug). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_cache_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. Since -Bazel does not garbage-collect the directory, you might want to automate a -periodic cleanup of this directory. Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/docs/remote-execution-rules) -* [Troubleshooting Remote Execution](/docs/remote-execution-sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) diff --git a/6.5.0/remote/creating.mdx b/6.5.0/remote/creating.mdx deleted file mode 100644 index fec164a..0000000 --- a/6.5.0/remote/creating.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - -[Persistent workers](/docs/persistent-workers) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/docs/multiplex-worker). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output string that describes any errors encountered in processing or executing -the request. The `output` field contains a short description; complete logs may -be written to the worker's `stderr`. Because workers may only write -`WorkResponses` to `stdout`, it's common for the worker to redirect the `stdout` -of any tools it uses to `stderr`. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/docs/persistent-workers#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/6.5.0/remote/multiplex.mdx b/6.5.0/remote/multiplex.mdx deleted file mode 100644 index d352848..0000000 --- a/6.5.0/remote/multiplex.mdx +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noexperimental_worker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/docs/dynamic-execution) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/6.5.0/remote/output-directories.mdx b/6.5.0/remote/output-directories.mdx deleted file mode 100644 index 558daee..0000000 --- a/6.5.0/remote/output-directories.mdx +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a WORKSPACE file (the - "_workspace directory_"), or a subdirectory thereof. It reports an error if it - is not. -* The _outputRoot_ directory defaults to `~/.cache/bazel` on Linux, - `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if set, - else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - directory. So, for example, if Bazel is running in the workspace directory - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace directory is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace directory - bazel-my-project => <...my-project> <== Symlink to execRoot - bazel-out => <...bin> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace directory (such as - /home/some-user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - action_outs/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - All actions can access its inputs and outputs relative - to the execroot directory. - <workspace-name>/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - <workspace-name>/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/6.5.0/remote/persistent.mdx b/6.5.0/remote/persistent.mdx deleted file mode 100644 index 6c21ed4..0000000 --- a/6.5.0/remote/persistent.mdx +++ /dev/null @@ -1,271 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -You can use the -[`--high_priority_workers`](/reference/command-line-reference#flag--high_priority_workers) -flag to specify a mnemonic that should be run in preference to normal-priority -mnemonics. This can help prioritize actions that are always in the critical -path. If there are two or more high priority workers executing requests, all -other workers are prevented from running. This flag can be used multiple times. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/docs/creating-workers) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/docs/multiplex-worker) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/6.5.0/remote/rbe.mdx b/6.5.0/remote/rbe.mdx deleted file mode 100644 index 27a41bc..0000000 --- a/6.5.0/remote/rbe.mdx +++ /dev/null @@ -1,32 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/docs/remote-execution-rules). diff --git a/6.5.0/remote/rules.mdx b/6.5.0/remote/rules.mdx deleted file mode 100644 index 6cecf6a..0000000 --- a/6.5.0/remote/rules.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/docs/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/docs/remote-execution-sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/docs/workspace-log). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/6.5.0/remote/sandbox.mdx b/6.5.0/remote/sandbox.mdx deleted file mode 100644 index ee9d79f..0000000 --- a/6.5.0/remote/sandbox.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/docs/remote-execution-rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --define=EXECUTOR=remote -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/docs/remote-execution-rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/docs/remote-execution-rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/docs/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/docs/remote-execution-rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/docs/remote-execution-rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/docs/remote-execution-rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/6.5.0/remote/workspace.mdx b/6.5.0/remote/workspace.mdx deleted file mode 100644 index 521f078..0000000 --- a/6.5.0/remote/workspace.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/docs/remote-execution-rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/6.5.0/rules/bzl-style.mdx b/6.5.0/rules/bzl-style.mdx deleted file mode 100644 index 576842e..0000000 --- a/6.5.0/rules/bzl-style.mdx +++ /dev/null @@ -1,210 +0,0 @@ ---- -title: '.bzl style guide' ---- - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/rules/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/rules/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/6.5.0/rules/challenges.mdx b/6.5.0/rules/challenges.mdx deleted file mode 100644 index fa7c68e..0000000 --- a/6.5.0/rules/challenges.mdx +++ /dev/null @@ -1,222 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/6.5.0/rules/deploying.mdx b/6.5.0/rules/deploying.mdx deleted file mode 100644 index d21593a..0000000 --- a/6.5.0/rules/deploying.mdx +++ /dev/null @@ -1,284 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - -This page is for rule writers who are planning to make their rules available -to others. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Contact the [bazel-dev mailing list](https://groups.google.com/forum/#!forum/bazel-dev) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you must follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala) or platform -(such as Android). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - WORKSPACE - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### WORKSPACE - -In the project's `WORKSPACE`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please contact -[bazel-dev mailing list](https://groups.google.com/forum/#!forum/bazel-dev) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -workspace(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains (at least) what -users will need to copy-paste into their `WORKSPACE` file to use your rule. -In general, this will be a `http_archive` pointing to your GitHub release and -a macro call that downloads/configures any tools your rule needs. For example, -for the [Go -rules](https://github.com/bazelbuild/rules_go#setup), this -looks like: - -``` -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "rules_go", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.18.5/rules_go-0.18.5.tar.gz"], - sha256 = "a82a352bffae6bee4e95f68a8d80a70e87f42c4741e6a448bec11998fcc82329", -) -load("@rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") -go_rules_dependencies() -go_register_toolchains() -``` - -If your rules depend on another repository's rules, specify that in the -rules documentation (for example, see the -[Skydoc rules](https://skydoc.bazel.build/docs/getting_started_stardoc.html), -which depend on the Sass rules), and provide a `WORKSPACE` -macro that will download all dependencies (see `rules_go` above). - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/docs/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies. To make depending on your rules -simpler, please provide a `WORKSPACE` macro that will declare dependencies on -those external dependencies. Do not declare dependencies of tests there, only -dependencies that rules require to work. Put development dependencies into the -`WORKSPACE` file. - -Create a file named `/repositories.bzl` and provide a single entry point -macro named `rules__dependencies`. Our directory will look as follows: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl - repositories.bzl -``` - - -#### Registering toolchains - -Your rules might also register toolchains. Please provide a separate `WORKSPACE` -macro that registers these toolchains. This way users can decide to omit the -previous macro and control dependencies manually, while still being allowed to -register toolchains. - -Therefore add a `WORKSPACE` macro named `rules__toolchains` into -`/repositories.bzl` file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `WORKSPACE` file. This snippet in general will look as follows: - -``` -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "rules_", - urls = ["//:repositories.bzl", "rules__dependencies", "rules__toolchains") -rules__dependencies() -rules__toolchains() -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## Testing - -Set up Travis as described in their [getting started -docs](https://docs.travis-ci.com/user/getting-started/). Then add a -`.travis.yml` file to your repository with the following content: - -``` -dist: xenial # Ubuntu 16.04 - -# On trusty (or later) images, the Bazel apt repository can be used. -addons: - apt: - sources: - - sourceline: 'deb [arch=amd64] http://storage.googleapis.com/bazel-apt stable jdk1.8' - key_url: 'https://bazel.build/bazel-release.pub.gpg' - packages: - - bazel - -script: - - bazel build //... - - bazel test //... -``` - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to copy-paste a rule into their `WORKSPACE` file, as shown in the -`README.md` section above. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/6.5.0/rules/errors/read-only-variable.mdx b/6.5.0/rules/errors/read-only-variable.mdx deleted file mode 100644 index 3d5ffeb..0000000 --- a/6.5.0/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,29 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/6.5.0/rules/faq.mdx b/6.5.0/rules/faq.mdx deleted file mode 100644 index 8c8e01b..0000000 --- a/6.5.0/rules/faq.mdx +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/rules/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/rules/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/rules/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/6.5.0/rules/index.mdx b/6.5.0/rules/index.mdx deleted file mode 100644 index 94a8560..0000000 --- a/6.5.0/rules/index.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Rules' ---- - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/rules/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [Boost](https://github.com/nelhage/rules_boost) -* [C / C++](/docs/bazel-and-cpp) -* [Docker](https://github.com/bazelbuild/rules_docker) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Kubernetes](https://github.com/bazelbuild/rules_k8s) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/contribute/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platform#constraint_setting) - - [`constraint_value`](/reference/be/platform#constraint_value) - - [`platform`](/reference/be/platform#platform) - - [`toolchain`](/reference/be/platform#toolchain) - - [`toolchain_type`](/reference/be/platform#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`new_git_repository`](/rules/lib/repo/git#new_git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/6.5.0/rules/macro-tutorial.mdx b/6.5.0/rules/macro-tutorial.mdx deleted file mode 100644 index b1a84d6..0000000 --- a/6.5.0/rules/macro-tutorial.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Creating a Macro' ---- - - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/rules/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -``` python -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -``` python -def miniature(name, src, size="100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - -* By convention, macros have a `name` argument, just like rules. - -* To document the behavior of a macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - -* To call a `genrule`, or any other native rule, use `native.`. - -* Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like `visibility`, - or `tags`. - -Now, use the macro from the `BUILD` file: - -``` python -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/6.5.0/rules/performance.mdx b/6.5.0/rules/performance.mdx deleted file mode 100644 index 1fedfba..0000000 --- a/6.5.0/rules/performance.mdx +++ /dev/null @@ -1,443 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/rules/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -Bazel writes a JSON profile to `command.profile.gz` in the output base by -default. You can configure the location with the -[`--profile`](/docs/user-manual#profile) flag, for example -`--profile=/tmp/profile.gz`. Location ending with `.gz` are compressed with -GZIP. - -To see the results, open `chrome://tracing` in a Chrome browser tab, click -"Load" and pick the (potentially compressed) profile file. For more detailed -results, click the boxes in the lower left corner. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### Profile information - -Example profile: - -![Example profile](/rules/profile.png "Example profile") - -**Figure 1.** Example profile. - -There are some special rows: - -* `action counters`: Displays how many concurrent actions are in flight. Click - on it to see the actual value. Should go up to the value of `--jobs` in - clean builds. -* `cpu counters`: For each second of the build, displays the amount of CPU - that is used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `grpc-command-1`: Bazel's main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Bazel", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Service Thread`: Displays minor and major Garbage Collection (GC) pauses. - -Other rows represent Bazel threads and show all events on that thread. - -### Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 15s-30s in above screenshot). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -### Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Tue Jun 16 08:30:21 CEST 2020", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - {"cat":"build phase marker","name":"Launch Bazel","ph":"X","ts":-1824000,"dur":1824000,"pid":1,"tid":60}, - ... - {"cat":"general information","name":"NoSpawnCacheModule.beforeCommand","ph":"X","ts":116461,"dur":419,"pid":1,"tid":60}, - ... - {"cat":"package creation","name":"src","ph":"X","ts":279844,"dur":15479,"pid":1,"tid":838}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Service Thread"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":334626,"dur":13000,"pid":1,"tid":11}, - - ... - {"cat":"action processing","name":"Compiling third_party/grpc/src/core/lib/transport/status_conversion.cc","ph":"X","ts":12630845,"dur":136644,"pid":1,"tid":1546} - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass `--noslim_json_profile` if you would like to -prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). - -### analyze-profile - -This profiling method consists of two steps, first you have to execute your -build/test with the `--profile` flag, for example - -``` -$ bazel build --profile=/tmp/prof //path/to:target -``` - -The file generated (in this case `/tmp/prof`) is a binary file, which can be -postprocessed and analyzed by the `analyze-profile` command: - -``` -$ bazel analyze-profile /tmp/prof -``` - -By default, it prints summary analysis information for the specified profile -datafile. This includes cumulative statistics for different task types for each -build phase and an analysis of the critical path. - -The first section of the default output is an overview of the time spent -on the different build phases: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule's -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent:$(BAZEL)/third_party/allocation_instrumenter/java-allocation-instrumenter-3.3.0.jar \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: The bazel repository comes with an allocation instrumenter. -Make sure to adjust `$(BAZEL)` for your repository location. - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/6.5.0/rules/rules-tutorial.mdx b/6.5.0/rules/rules-tutorial.mdx deleted file mode 100644 index 702acdb..0000000 --- a/6.5.0/rules/rules-tutorial.mdx +++ /dev/null @@ -1,369 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/getting-started) and [Starlark](/rules/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/docs/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Some readers will notice that "bzl file evaluation" is printed again, although -the evaluation of foo.bzl is cached after the call to `bazel query`. Bazel -doesn't reevaluate the code, it only replays the print events. Regardless of -the cache state, you get the same output. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](rules#contents). -* Get familiar with [depsets](depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/6.5.0/rules/testing.mdx b/6.5.0/rules/testing.mdx deleted file mode 100644 index eb22677..0000000 --- a/6.5.0/rules/testing.mdx +++ /dev/null @@ -1,473 +0,0 @@ ---- -title: 'Testing' ---- - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/rules/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/6.5.0/rules/verbs-tutorial.mdx b/6.5.0/rules/verbs-tutorial.mdx deleted file mode 100644 index 37015d4..0000000 --- a/6.5.0/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,154 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/rules/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following macro to create targets for both of the above -rules together: - -```python -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/6.5.0/run/bazelrc.mdx b/6.5.0/run/bazelrc.mdx deleted file mode 100644 index ec0a214..0000000 --- a/6.5.0/run/bazelrc.mdx +++ /dev/null @@ -1,244 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `WORKSPACE` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage` inherits from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: -- System rc-file located at `etc/bazel.bazelrc`. -- Workspace rc-file located at `$workspace/tools/bazel.rc`. -- Home rc-file localted at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` startup option. diff --git a/6.5.0/run/client-server.mdx b/6.5.0/run/client-server.mdx deleted file mode 100644 index be1f600..0000000 --- a/6.5.0/run/client-server.mdx +++ /dev/null @@ -1,48 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. - -When you run `bazel`, you're running the client. The client finds the server -based on the output base, which by default is determined by the path of the base -workspace directory and your userid, so if you build in multiple workspaces, -you'll have multiple output bases and thus multiple Bazel server processes. -Multiple users on the same workstation can build concurrently in the same -workspace because their output bases will differ (different userids). If the -client cannot find a running server instance, it starts a new one. The server -process will stop after a period of inactivity (3 hours, by default, which can -be modified using the startup option `--max_idle_secs`). - -For the most part, the fact that there is a server running is invisible to the -user, but sometimes it helps to bear this in mind. For example, if you're -running scripts that perform a lot of automated builds in different directories, -it's important to ensure that you don't accumulate a lot of idle servers; you -can do this by explicitly shutting them down when you're finished with them, or -by specifying a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/6.5.0/run/scripts.mdx b/6.5.0/run/scripts.mdx deleted file mode 100644 index 612bace..0000000 --- a/6.5.0/run/scripts.mdx +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you will need to give each one a different (or random) output base. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/docs/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Reserved for Google-internal use. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/docs/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/6.5.0/start/android-app.mdx b/6.5.0/start/android-app.mdx deleted file mode 100644 index 67ce43e..0000000 --- a/6.5.0/start/android-app.mdx +++ /dev/null @@ -1,422 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel [workspace](/reference/be/workspace) that contains the source code - for the app and a `WORKSPACE` file that identifies the top level of the - workspace directory. -* Update the `WORKSPACE` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `WORKSPACE` file at -its root. - -The `WORKSPACE` file may be empty or may contain references to [external -dependencies](/docs/external) required to build your project. - -First, run the following command to create an empty `WORKSPACE` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch WORKSPACE` | -| Windows (Command Prompt) | `type nul > WORKSPACE` | -| Windows (PowerShell) | `New-Item WORKSPACE -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`WORKSPACE` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`WORKSPACE` file so that Bazel knows where to find them. - -Add the following line to your `WORKSPACE` file: - -```python -android_sdk_repository(name = "androidsdk") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - -You can also explicitly specify the absolute path of the Android SDK, -the API level, and the version of build tools to use by including the `path`, -`api_level`, and `build_tools_version` attributes. If `api_level` and -`build_tools_version` are not specified, the `android_sdk_repository` rule will -use the respective latest version available in the SDK. You can specify any -combination of these attributes, as long as they are present in the SDK, for -example: - -```python -android_sdk_repository( - name = "androidsdk", - path = "/path/to/Android/sdk", - api_level = 25, - build_tools_version = "30.0.3" -) -``` - -On Windows, note that the `path` attribute must use the mixed-style path, that -is, a Windows path with forward slashes: - -```python -android_sdk_repository( - name = "androidsdk", - path = "c:/path/to/Android/sdk", -) -``` - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and tell Bazel where to find it by adding the following line to your `WORKSPACE` file: - -```python -android_ndk_repository(name = "androidndk") -``` - -Similar to `android_sdk_repository`, the path to the Android NDK is inferred -from the `ANDROID_NDK_HOME` environment variable by default. The path can also -be explicitly specified with a `path` attribute on `android_ndk_repository`. - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -`api_level` is the version of the Android API that the SDK and NDK -target - for example, 23 for Android 6.0 and 25 for Android 7.1. If not -explicitly set, `api_level` defaults to the highest available API level for -`android_sdk_repository` and `android_ndk_repository`. - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `WORKSPACE`. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `WORKSPACE` is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/6.5.0/start/cpp.mdx b/6.5.0/start/cpp.mdx deleted file mode 100644 index cb81696..0000000 --- a/6.5.0/start/cpp.mdx +++ /dev/null @@ -1,406 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - -## Introduction - -New to Bazel? You’re in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel’s context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven’t -already. This tutorial uses Git for source control, so for best results -[install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as -well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` directory. - -Take a look below at how it’s structured: - -``` -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── WORKSPACE - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── WORKSPACE - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── WORKSPACE -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -you will build both a binary and a library from a single package. In -the third and final stage, you will build a project with multiple packages and -build it with multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your [workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -### Set up the workspace - - Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains these significant files: - -* The [`WORKSPACE` file](https://bazel.build/reference/glossary#workspace-file) -, which identifies the directory and its contents as a Bazel workspace and -lives at the root of the project's directory structure. -* One or more [`BUILD` files](https://bazel.build/reference/glossary#build-file) -, which tell Bazel how to build different parts of the project. A -directory within the workspace that contains a BUILD file is a -[package](https://bazel.build/reference/glossary#package). (More on packages -later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `WORKSPACE` in that directory. For the purposes of this tutorial, -a `WORKSPACE` file is already present in each stage. - -**NOTE**: When Bazel builds the project, all inputs must be in -the same workspace. Files residing in different workspaces are independent of -one another unless linked. More detailed information about workspace rules can -be found in [this guide](https://bazel.build/reference/be/workspace). - - -### Understand the BUILD file - - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one [rule](https://bazel.build/reference/glossary#rule) -as a set of instructions, which tells Bazel how to build the desired outputs, -such as executable binaries or libraries. Each instance of a build rule in the -`BUILD` file is called a [target](https://bazel.build/reference/glossary#target) -and points to a specific set of source files and [dependencies](https://bazel.build/reference/glossary#dependency). -A target can also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -``` -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[cc_binary rule](https://bazel.build/reference/be/c-cpp#cc_binary). -The rule tells Bazel to build a self-contained executable binary from the -hello-world.cc source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - - -## Stage 1: single target, single package - -It’s time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -``` -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── WORKSPACE -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the -workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed “`Hello world`” message. - -Here’s the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source file.] -(/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world displays -a single target with a single source file.") - - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a build -is structured. In the next stage, you will add complexity by adding another -target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up your -builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -``` - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── WORKSPACE -``` - -Take a look below at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -``` -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library -(using Bazel's built-in [cc_library rule](https://bazel.build/reference/be/c-cpp#cc_library)), -then the hello-world binary. The deps attribute in -the hello-world target tells Bazel that the hello-greet -library is required to build the hello-world binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another “`Hello world`”: - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only recompiles -that file. - -Looking at the dependency graph, you can see that hello-world depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph for `hello-world` displays structure changes after modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency graph for `hello-world` displays structure changes after modification to the file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look below at the structure and contents of the -`cpp-tutorial/stage3` directory: - -``` -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── WORKSPACE -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -``` -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -``` -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package depends on the target in the `lib` package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for `hello-world` displays how the target in the main package depends on the target in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You’ve now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the tutorials -for [Java](https://bazel.build/tutorials/java), [Android application](https://bazel.build/tutorials/android-app), -or [iOS application](https://bazel.build/tutorials/ios-app). -* To learn more about working with local and remote repositories, read about -[external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel’s other rules, see this [reference guide](https://bazel.build/rules). - -Happy building! diff --git a/6.5.0/start/ios-app.mdx b/6.5.0/start/ios-app.mdx deleted file mode 100644 index a46b8dc..0000000 --- a/6.5.0/start/ios-app.mdx +++ /dev/null @@ -1,361 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial covers how to build a simple iOS app using Bazel. - -## What you'll learn - -In this tutorial, you learn how to: - -* Set up the environment by installing Bazel and Xcode, and downloading the - sample project -* Set up a Bazel [workspace](/concepts/build-ref#workspace) that contained the source code - for the app and a `WORKSPACE` file that identifies the top level of the - workspace directory -* Update the `WORKSPACE` file to contain references to the required - external dependencies -* Create a `BUILD` file -* Run Bazel to build the app for the simulator and an iOS device -* Run the app in the simulator and on an iOS device - -## Set up your environment - -To get started, install Bazel and Xcode, and get the sample project. - -### Install Bazel - -Follow the [installation instructions](/install/) to install Bazel and -its dependencies. - -### Install Xcode - -Download and install [Xcode](https://developer.apple.com/xcode/downloads/). -Xcode contains the compilers, SDKs, and other tools required by Bazel to build -Apple applications. - -### Get the sample project - -You also need to get the sample project for the tutorial from GitHub. The GitHub -repo has two branches: `source-only` and `main`. The `source-only` branch -contains the source files for the project only. You'll use the files in this -branch in this tutorial. The `main` branch contains both the source files -and completed Bazel `WORKSPACE` and `BUILD` files. You can use the files in this -branch to check your work when you've completed the tutorial steps. - -Enter the following at the command line to get the files in the `source-only` -branch: - -```bash -cd $HOME -git clone -b source-only https://github.com/bazelbuild/examples -``` - -The `git clone` command creates a directory named `$HOME/examples/`. This -directory contains several sample projects for Bazel. The project files for this -tutorial are in `$HOME/examples/tutorial/ios-app`. - -## Set up a workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, as well as a `WORKSPACE` file -and `BUILD` files that contain the instructions that Bazel uses to build -the software. The workspace may also contain symbolic links to output -directories. - -A workspace directory can be located anywhere on your filesystem and is denoted -by the presence of the `WORKSPACE` file at its root. In this tutorial, your -workspace directory is `$HOME/examples/tutorial/`, which contains the sample -project files you cloned from the GitHub repo in the previous step. - -Note: Bazel itself doesn't impose any requirements for organizing source -files in your workspace. The sample source files in this tutorial are organized -according to conventions for the target platform. - -For your convenience, set the `$WORKSPACE` environment variable now to refer to -your workspace directory. At the command line, enter: - -```bash -export WORKSPACE=$HOME/examples/tutorial -``` - -### Create a WORKSPACE file - -Every workspace must have a text file named `WORKSPACE` located in the top-level -workspace directory. This file may be empty or it may contain references -to [external dependencies](/docs/external) required to build the -software. - -For now, you'll create an empty `WORKSPACE` file, which simply serves to -identify the workspace directory. In later steps, you'll update the file to add -external dependency information. - -Enter the following at the command line: - -```bash -touch $WORKSPACE/WORKSPACE -open -a Xcode $WORKSPACE/WORKSPACE -``` - -This creates and opens the empty `WORKSPACE` file. - -### Update the WORKSPACE file - -To build applications for Apple devices, Bazel needs to pull the latest -[Apple build rules](https://github.com/bazelbuild/rules_apple) -from its GitHub repository. To enable this, add the following -[`git_repository`](/reference/be/workspace#git_repository) -rules to your `WORKSPACE` file: - -```python -load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") - -git_repository( - name = "build_bazel_rules_apple", - remote = "https://github.com/bazelbuild/rules_apple.git", - tag = "0.19.0", -) - -git_repository( - name = "build_bazel_rules_swift", - remote = "https://github.com/bazelbuild/rules_swift.git", - tag = "0.13.0", -) - -git_repository( - name = "build_bazel_apple_support", - remote = "https://github.com/bazelbuild/apple_support.git", - tag = "0.7.2", -) - -git_repository( - name = "bazel_skylib", - remote = "https://github.com/bazelbuild/bazel-skylib.git", - tag = "0.9.0", -) -``` - -Note: "Always use the -[latest version of the build_apple rules](https://github.com/bazelbuild/rules_apple/releases) -in the `tag` attribute. Make sure to check the latest dependencies required in -`rules_apple`'s [project](https://github.com/bazelbuild/rules_apple)." - -Note: You **must** set the value of the `name` attribute in the -`git_repository` rule to `build_bazel_rules_apple` or the build will fail. - -## Review the source files - -Take a look at the source files for the app located in -`$WORKSPACE/ios-app/UrlGet`. Again, you're just looking at these files now to -become familiar with the structure of the app. You don't have to edit any of the -source files to complete this tutorial. - -## Create a BUILD file - -At a command-line prompt, open a new `BUILD` file for editing: - -```bash -touch $WORKSPACE/ios-app/BUILD -open -a Xcode $WORKSPACE/ios-app/BUILD -``` - -### Add the rule load statement - -To build iOS targets, Bazel needs to load build rules from its GitHub repository -whenever the build runs. To make these rules available to your project, add the -following load statement to the beginning of your `BUILD` file: - -``` -load("@build_bazel_rules_apple//apple:ios.bzl", "ios_application") -``` - -You only need to load the `ios_application` rule because the `objc_library` -rule is built into the Bazel package. - -### Add an objc_library rule - -Bazel provides several build rules that you can use to build an app for the -iOS platform. For this tutorial, you'll first use the -[`objc_library`](/reference/be/objective-c#objc_library) rule to tell Bazel -how to build a static library from the app source code and Xib files. Then -you'll use the -[`ios_application`](https://github.com/bazelbuild/rules_apple/tree/main/doc) -rule to tell it how to build the application binary and the `.ipa` bundle. - -Note: This tutorial presents a minimal use case of the Objective-C rules in -Bazel. For example, you have to use the `ios_application` rule to build -multi-architecture iOS apps. - -Add the following to your `BUILD` file: - -```python -objc_library( - name = "UrlGetClasses", - srcs = [ - "UrlGet/AppDelegate.m", - "UrlGet/UrlGetViewController.m", - "UrlGet/main.m", - ], - hdrs = glob(["UrlGet/*.h"]), - data = ["UrlGet/UrlGetViewController.xib"], -) -``` - -Note the name of the rule, `UrlGetClasses`. - -### Add an ios_application rule - -The -[`ios_application`](https://github.com/bazelbuild/rules_apple/tree/main/doc) -rule builds the application binary and creates the `.ipa` bundle file. - -Add the following to your `BUILD` file: - -```python -ios_application( - name = "ios-app", - bundle_id = "Google.UrlGet", - families = [ - "iphone", - "ipad", - ], - minimum_os_version = "9.0", - infoplists = [":UrlGet/UrlGet-Info.plist"], - visibility = ["//visibility:public"], - deps = [":UrlGetClasses"], -) -``` - -Note: Please update the `minimum_os_version` attribute to the minimum -version of iOS that you plan to support. - -Note how the `deps` attribute references the output of the `UrlGetClasses` rule -you added to the `BUILD` file above. - -Now, save and close the file. You can compare your `BUILD` file to the -[completed example](https://github.com/bazelbuild/examples/blob/main/tutorial/ios-app/BUILD) -in the `main` branch of the GitHub repo. - -## Build and deploy the app - -You are now ready to build your app and deploy it to a simulator and onto an -iOS device. - -Note: The app launches standalone but requires a backend server in order to -produce output. See the README file in the sample project directory to find out -how to build the backend server. - -The built app is located in the `$WORKSPACE/bazel-bin` directory. - -Completed `WORKSPACE` and `BUILD` files for this tutorial are located in the -[main branch](https://github.com/bazelbuild/examples/tree/main/tutorial) -of the GitHub repo. You can compare your work to the completed files for -additional help or troubleshooting. - -### Build the app for the simulator - -Make sure that your current working directory is inside your Bazel workspace: - -```bash -cd $WORKSPACE -``` - -Now, enter the following to build the sample app: - -```bash -bazel build //ios-app:ios-app -``` - -Bazel launches and builds the sample app. During the build process, its -output will appear similar to the following: - -```bash -INFO: Found 1 target... -Target //ios-app:ios-app up-to-date: - bazel-bin/ios-app/ios-app.ipa -INFO: Elapsed time: 0.565s, Critical Path: 0.44s -``` - -### Find the build outputs - -The `.ipa` file and other outputs are located in the -`$WORKSPACE/bazel-bin/ios-app` directory. - -### Run and debug the app in the simulator - -You can now run the app from Xcode using the iOS Simulator. First, -[generate an Xcode project using Tulsi](http://tulsi.bazel.build/). - -Then, open the project in Xcode, choose an iOS Simulator as the runtime scheme, -and click **Run**. - -Note: If you modify any project files in Xcode (for example, if you add or -remove a file, or add or change a dependency), you must rebuild the app using -Bazel, re-generate the Xcode project in Tulsi, and then re-open the project in -Xcode. - -### Build the app for a device - -To build your app so that it installs and launches on an iOS device, Bazel needs -the appropriate provisioning profile for that device model. Do the following: - -1. Go to your [Apple Developer Account](https://developer.apple.com/account) - and download the appropriate provisioning profile for your device. See - [Apple's documentation](https://developer.apple.com/library/ios/documentation/IDEs/Conceptual/AppDistributionGuide/MaintainingProfiles/MaintainingProfiles.html) - for more information. - -2. Move your profile into `$WORKSPACE`. - -3. (Optional) Add your profile to your `.gitignore` file. - -4. Add the following line to the `ios_application` target in your `BUILD` file: - - ```python - provisioning_profile = ".mobileprovision", - ``` - -Note: Ensure the profile is correct so that the app can be installed on a -device. - -Now build the app for your device: - -```bash -bazel build //ios-app:ios-app --ios_multi_cpus=armv7,arm64 -``` - -This builds the app as a fat binary. To build for a specific device -architecture, designate it in the build options. - -To build for a specific Xcode version, use the `--xcode_version` option. To -build for a specific SDK version, use the `--ios_sdk_version` option. The -`--xcode_version` option is sufficient in most scenarios. - -To specify a minimum required iOS version, add the `minimum_os_version` -parameter to the `ios_application` build rule in your `BUILD` file. - -You can also use -[Tulsi](http://tulsi.bazel.build/docs/gettingstarted.html) to -build your app using a GUI rather than the command line. - -### Install the app on a device - -The easiest way to install the app on the device is to launch Xcode and use the -`Windows > Devices` command. Select your plugged-in device from the list on the -left, then add the app by clicking the **Add** (plus sign) button under -"Installed Apps" and selecting the `.ipa` file that you built. - -If your app fails to install on your device, ensure that you are specifying the -correct provisioning profile in your `BUILD` file (step 4 in the previous -section). - -If your app fails to launch, make sure that your device is part of your -provisioning profile. The `View Device Logs` button on the `Devices` screen in -Xcode may provide other information as to what has gone wrong. - -## Further reading - -For more details, see -[main branch](https://github.com/bazelbuild/examples/tree/main/tutorial) -of the GitHub repo. - diff --git a/6.5.0/start/java.mdx b/6.5.0/start/java.mdx deleted file mode 100644 index 7864b5f..0000000 --- a/6.5.0/start/java.mdx +++ /dev/null @@ -1,435 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── WORKSPACE -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `WORKSPACE` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`WORKSPACE` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -two source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/tutorials/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/tutorials/android-app) and - [iOS application tutorial](/tutorials/ios-app) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/6.5.0/tutorials/cpp-dependency.mdx b/6.5.0/tutorials/cpp-dependency.mdx deleted file mode 100644 index 6f8300b..0000000 --- a/6.5.0/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/6.5.0/tutorials/cpp-labels.mdx b/6.5.0/tutorials/cpp-labels.mdx deleted file mode 100644 index 978952e..0000000 --- a/6.5.0/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,26 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `WORKSPACE` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/6.5.0/tutorials/cpp-use-cases.mdx b/6.5.0/tutorials/cpp-use-cases.mdx deleted file mode 100644 index e20e993..0000000 --- a/6.5.0/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,258 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/tutorials/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── WORKSPACE -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Including external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest). -You can use one of the repository functions in the `WORKSPACE` file to -download Google Test and make it available in your repository: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "gtest", - url = "https://github.com/google/googletest/archive/release-1.10.0.zip", - sha256 = "94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91", - build_file = "@//:gtest.BUILD", -) -``` - -Note: If the destination already contains a `BUILD` file, you can leave -out the `build_file` attribute. - -Then create `gtest.BUILD`, a `BUILD` file used to compile Google Test. -Google Test has several "special" requirements that make its `cc_library` rule -more complicated: - -* `googletest-release-1.10.0/src/gtest-all.cc` `#include`s all other - files in `googletest-release-1.10.0/src/`: exclude it from the - compile to prevent link errors for duplicate symbols. - -* It uses header files that are relative to the -`googletest-release-1.10.0/include/` directory (`"gtest/gtest.h"`), so you must -add that directory to the include paths. - -* It needs to link in `pthread`, so add that as a `linkopt`. - -The final rule therefore looks like this: - -```python -cc_library( - name = "main", - srcs = glob( - ["googletest-release-1.10.0/src/*.cc"], - exclude = ["googletest-release-1.10.0/src/gtest-all.cc"] - ), - hdrs = glob([ - "googletest-release-1.10.0/include/**/*.h", - "googletest-release-1.10.0/src/*.h" - ]), - copts = [ - "-Iexternal/gtest/googletest-release-1.10.0/include", - "-Iexternal/gtest/googletest-release-1.10.0" - ], - linkopts = ["-pthread"], - visibility = ["//visibility:public"], -) -``` - -This is somewhat messy: everything is prefixed with `googletest-release-1.10.0` -as a byproduct of the archive's structure. You can make `http_archive` strip -this prefix by adding the `strip_prefix` attribute: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "gtest", - url = "https://github.com/google/googletest/archive/release-1.10.0.zip", - sha256 = "94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91", - build_file = "@//:gtest.BUILD", - strip_prefix = "googletest-release-1.10.0", -) -``` - -Then `gtest.BUILD` would look like this: - -```python -cc_library( - name = "main", - srcs = glob( - ["src/*.cc"], - exclude = ["src/gtest-all.cc"] - ), - hdrs = glob([ - "include/**/*.h", - "src/*.h" - ]), - copts = ["-Iexternal/gtest/include"], - linkopts = ["-pthread"], - visibility = ["//visibility:public"], -) -``` - -Now `cc_` rules can depend on `@gtest//:main`. - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = ["-Iexternal/gtest/include"], - deps = [ - "@gtest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/6.5.0/versions/index.mdx b/6.5.0/versions/index.mdx deleted file mode 100644 index f71981b..0000000 --- a/6.5.0/versions/index.mdx +++ /dev/null @@ -1,16 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - -The documentation on this website represents the latest in Bazel. Documentation -is updated at head. Each major supported release will have a snapshot of the -narrative and reference documentation that follows the lifecycle of Bazel's -version support. - -For now, to see documentation for older Bazel versions, go to -[docs.bazel.build](https://docs.bazel.build/). - - - To the Archives! - diff --git a/7.6.1/about/faq.mdx b/7.6.1/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/7.6.1/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/7.6.1/about/intro.mdx b/7.6.1/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/7.6.1/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/7.6.1/about/roadmap.mdx b/7.6.1/about/roadmap.mdx deleted file mode 100644 index 2b14110..0000000 --- a/7.6.1/about/roadmap.mdx +++ /dev/null @@ -1,75 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -Happy new year to our Bazel community. With the new year, we plan to bring details of our 2023 roadmap. Last year, we published our 2022 year roadmap with our Bazel 6.0 plans. We hope that the roadmap provided informed your build tooling needs. As the Bazel project continues to evolve in response to your needs, we want to share our 2023 update. - -With these changes, we’re looking to keep our open-source community informed and included. This roadmap describes current initiatives and predictions for the future of Bazel development, giving you visibility into current priorities and ongoing projects. - -## Bazel 7.0 Release -We plan to bring Bazel 7.0 [long term support (LTS)](https://bazel.build/release/versioning) to you in late 2023. With Bazel 7.0 we aim to deliver many of the in progress items and continue to work through feature improvements that our users have been asking for. - -### Better cross-platform cache sharing -Enables [cached artifacts](https://docs.google.com/document/d/1o0mrl2DanfV_6kB_Kf_jUdge13CQ8CvCiqeni2o-rvA/edit#heading=h.mvuo768l4ja2) to be shared across different build local (Mac) and remote (Linux) build platforms primarily for Java/Kotlin and Android development, resulting in better performance and efficient cache usage. - -### Android app build with Bazel -Manifest & Resource Merger updates to v30.1.3 so Android app developers can use newer manifest merging features like tools:node="merge". - -### Remote execution improvements -Bazel 7.0 provides support for asynchronous execution, speeding up remote execution via increased parallelism with flag --jobs. - -### Bzlmod: external dependency management system -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive dependencies, allowing projects to scale while staying fast and resource-efficient. Bazel 7.0 contains a number of enhancements to [Bazel's external dependency management](https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) functionality, including: - -- Bzlmod turned on by default for external dependency management in Bazel -- Lock file support — enables hermetic build with Bzlmod -- Vendor/offline mode support — allows users to run builds with pre-downloaded dependencies -- Complete repository cache support (caching not only downloads artifacts, but also the final repository content) -- [Bazel Central Registry](https://registry.bazel.build/) includes regular community contribution and adoption of key Bazel rules & projects - -### Build analysis metrics -Bazel 7.0 provides analysis-phase time metrics, letting developers optimize their own build performance. - -### Build without the Bytes turned on by default -[Builds without the Bytes](https://github.com/bazelbuild/bazel/issues/6862) optimizes performance by avoiding the download of intermediate artifacts and preventing builds from bottlenecking on network bandwidth. Features added include: - -- [Support for remote cache eviction with a lease service](https://docs.google.com/document/d/1wM61xufcMS5W0LQ0ar5JBREiN9zKfgu6AnHVD7BSkR4/edit#heading=h.mflzzzunlhlz), so that users don’t run into errors when artifacts are evicted prematurely - -- Address feature gaps in symlink support -- Provide options to retrieve intermediate outputs from remote actions - -### Build Productivity with Skymeld -Bazel 7.0 introduces Skymeld — an evaluation mode that reduces the wall time of your multi-target builds. Skymeld eliminates the barrier between analysis and execution phases to improve build speeds, especially for builds with multiple top-level targets. However, for single-target builds, no significant difference is expected. - -## Bazel Ecosystem & Tooling - -### Android app build with Bazel -- Migrate Android native rules to Starlark: For Bazel 7.0 the Android rules migrate to Starlark to decouple development from Bazel itself and to better enable community contributions. Additionally, we have made these rules independent of the core Bazel binary, allowing us to release more frequently. -- [Migration of Android rules to Starlark](https://bazel.build/reference/be/android) -- R8 support: Allows Android app developers to use R8 updated optimizations. -- Mobile Install: Allows Android app developers to develop, test, deploy any Android app changes quickly through an updated version of [Mobile Install](https://bazel.build/docs/mobile-install). - -### Software Bill of Materials data generation (SBOMs) & OSS license compliance tools -With Bazel, developers can generate data to help produce [SBOMs](https://security.googleblog.com/2022/06/sbom-in-action-finding-vulnerabilities.html). This data outputs in text or JSON format, and can be easily formatted to meet [SPDX](https://spdx.dev/specifications/) or [CycloneDX](https://cyclonedx.org/specification/overview/) specifications. Additionally, the process provides rules to declare the licenses Bazel modules are made available under, and tools to build processes around those declarations. See the in-progress [rules_license implementation](https://github.com/bazelbuild/rules_license) on GitHub. - -### Signed builds -Bazel provides trusted binaries for Windows and Mac signed with Google keys. This feature enables multi-platform developers/dev-ops to identify the source of Bazel binaries and protect their systems from potentially malicious, unverified binaries. - -### Migration of Java, C++, and Python rules to Starlark -Complete migration of Java, C++, and Python rulesets to Starlark. This effort allows Bazel users to fork only rulesets and not Bazel binary codebase, allowing users to - -- Update and customize rules as needed -- Update rules independently of Bazel - -### Bazel-JetBrains* IntelliJ IDEA support -Incremental IntelliJ plugin updates to support the latest JetBrains plugin release. - -*This roadmap snapshots targets, and should not be taken as guarantees. Priorities are subject to change in response to developer and customer feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the [Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/7.6.1/about/vision.mdx b/7.6.1/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/7.6.1/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/7.6.1/advanced/performance/build-performance-breakdown.mdx b/7.6.1/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/7.6.1/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/7.6.1/advanced/performance/build-performance-metrics.mdx b/7.6.1/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/7.6.1/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/7.6.1/advanced/performance/iteration-speed.mdx b/7.6.1/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 8ff923f..0000000 --- a/7.6.1/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. diff --git a/7.6.1/advanced/performance/json-trace-profile.mdx b/7.6.1/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 3c53102..0000000 --- a/7.6.1/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query Bazel writes such a profile to -`command.profile.gz`. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Use the flag -[`--experimental_announce_profile_path`](/reference/command-line-reference#flag--experimental_announce_profile_path) -to print the path to this file to the log. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_json_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/7.6.1/advanced/performance/memory.mdx b/7.6.1/advanced/performance/memory.mdx deleted file mode 100644 index f1cbd9c..0000000 --- a/7.6.1/advanced/performance/memory.mdx +++ /dev/null @@ -1,47 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -However, if your builds are big enough, Bazel may throw an `OutOfMemoryError` -(OOM) when it doesn't have enough memory. You can make Bazel use less memory, at -the cost of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. - diff --git a/7.6.1/basics/artifact-based-builds.mdx b/7.6.1/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/7.6.1/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/7.6.1/basics/build-systems.mdx b/7.6.1/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/7.6.1/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/7.6.1/basics/dependencies.mdx b/7.6.1/basics/dependencies.mdx deleted file mode 100644 index 165e1a7..0000000 --- a/7.6.1/basics/dependencies.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need to -have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the -[`buildtools` directory](https://github.com/bazelbuild/buildtools). - - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel -[enforces strict transitive dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version Rule](https://opensource.google/docs/thirdparty/oneversion/) -for all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -For this reason, Bazel does not automatically download transitive dependencies. -And, unfortunately, there’s no silver bullet—Bazel’s alternative is to require a -global file that lists every single one of the repository’s external -dependencies and an explicit version used for that dependency throughout the -repository. Fortunately, Bazel provides tools that are able to automatically -generate such a file containing the transitive dependencies of a set of Maven -artifacts. This tool can be run once to generate the initial `WORKSPACE` file -for a project, and that file can then be manually updated to adjust the versions -of each dependency. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location -that is shared across engineers, so if a developer depends on an artifact that -was recently built by someone else, the build system automatically downloads -it instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) goes -down, because your entire build might grind to a halt if it’s unable to download -an external dependency. There’s also a security risk: if the third-party system -is compromised by an attacker, the attacker could replace the referenced -artifact with one of their own design, allowing them to inject arbitrary code -into your build. Both problems can be mitigated by mirroring any artifacts you -depend on onto servers you control and blocking your build system from accessing -third-party artifact repositories like Maven Central. The trade-off is that -these mirrors take effort and resources to maintain, so the choice of whether to -use them often depends on the scale of the project. The security issue can also -be completely prevented with little overhead by requiring the hash of each -third-party artifact to be specified in the source repository, causing the build -to fail if the artifact is tampered with. Another alternative that completely -sidesteps the issue is to vendor your project’s dependencies. When a project -vendors its dependencies, it checks them into source control alongside the -project’s source code, either as source or as binaries. This effectively means -that all of the project’s external dependencies are converted to internal -dependencies. Google uses this approach internally, checking every third-party -library referenced throughout Google into a `third_party` directory at the root -of Google’s source tree. However, this works at Google only because Google’s -source control system is custom built to handle an extremely large monorepo, so -vendoring might not be an option for all organizations. diff --git a/7.6.1/basics/distributed-builds.mdx b/7.6.1/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/7.6.1/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/7.6.1/basics/hermeticity.mdx b/7.6.1/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/7.6.1/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/7.6.1/basics/index.mdx b/7.6.1/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/7.6.1/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/7.6.1/basics/task-based-builds.mdx b/7.6.1/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/7.6.1/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/7.6.1/build/share-variables.mdx b/7.6.1/build/share-variables.mdx deleted file mode 100644 index 6773e56..0000000 --- a/7.6.1/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of a target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/7.6.1/build/style-guide.mdx b/7.6.1/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/7.6.1/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/7.6.1/community/recommended-rules.mdx b/7.6.1/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/7.6.1/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/7.6.1/community/remote-execution-services.mdx b/7.6.1/community/remote-execution-services.mdx deleted file mode 100644 index 90949ce..0000000 --- a/7.6.1/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [Scoot](https://github.com/twitter/scoot) - * [TurboCache](https://github.com/allada/turbo-cache) - -* Commercial - - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service. Can be self-hosted or hosted. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. diff --git a/7.6.1/community/roadmaps-starlark.mdx b/7.6.1/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/7.6.1/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/7.6.1/community/sig.mdx b/7.6.1/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/7.6.1/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/7.6.1/community/update.mdx b/7.6.1/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/7.6.1/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/7.6.1/concepts/build-ref.mdx b/7.6.1/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/7.6.1/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/7.6.1/concepts/platforms.mdx b/7.6.1/concepts/platforms.mdx deleted file mode 100644 index 22718f7..0000000 --- a/7.6.1/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--cpu=ios_x86_64 --apple_platform_type=ios". - //platforms:ios - --cpu=ios_x86_64 - --apple_platform_type=ios - -flags: - # Maps "--cpu=ios_x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --cpu=ios_x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/7.6.1/concepts/visibility.mdx b/7.6.1/concepts/visibility.mdx deleted file mode 100644 index 90ba4d2..0000000 --- a/7.6.1/concepts/visibility.mdx +++ /dev/null @@ -1,462 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. - -A target `A` is visible to a target `B` if they are in the same package, or if -`A` grants visibility to `B`'s package. Thus, packages are the unit of -granularity for deciding whether or not to allow access. If `B` depends on `A` -but `A` is not visible to `B`, then any attempt to build `B` fails during -[analysis](/reference/glossary#analysis-phase). - -Note that granting visibility to a package does not by itself grant visibility -to its subpackages. For more details on package and subpackages, see -[Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This should not be done for production usage in -submitted code. - -The primary way to control visibility is with the -[`visibility`](/reference/be/common-definitions#common.visibility) attribute on -rule targets. This section describes the format of this attribute, and how to -determine a target's visibility. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that do not correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. (May not be combined - with any other specification.) - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this package can use this target. (May not be combined with any other - specification.) - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets defined -in `//tests/BUILD`, but not by targets defined in `//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is: - -1. The value of its `visibility` attribute, if set; or else - -2. The value of the -[`default_visibility`](/reference/be/functions#package.default_visibility) -argument of the [`package`](/reference/be/functions#package) statement in the -target's `BUILD` file, if such a declaration exists; or else - -3. `//visibility:private`. - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -#### Example - -File `//frobber/bin/BUILD`: - -```python -# This target is visible to everyone -cc_binary( - name = "executable", - visibility = ["//visibility:public"], - deps = [":library"], -) - -# This target is visible only to targets declared in the same package -cc_library( - name = "library", - # No visibility -- defaults to private since no - # package(default_visibility = ...) was used. -) - -# This target is visible to targets in package //object and //noun -cc_library( - name = "subject", - visibility = [ - "//noun:__pkg__", - "//object:__pkg__", - ], -) - -# See package group "//frobber:friends" (below) for who can -# access this target. -cc_library( - name = "thingy", - visibility = ["//frobber:friends"], -) -``` - -File `//frobber/BUILD`: - -```python -# This is the package group declaration to which target -# //frobber/bin:thingy refers. -# -# Our friends are packages //frobber, //fribber and any -# subpackage of //fribber. -package_group( - name = "friends", - packages = [ - "//fribber/...", - "//frobber", - ], -) -``` - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -### Source file target visibility - -You can explicitly set the visibility of a source file target by calling -[`exports_files`](/reference/be/functions#exports_files). When no `visibility` -argument is passed to `exports_files`, it makes the visibility public. -`exports_files` may not be used to override the visibility of a generated file. - -For source file targets that do not appear in a call to `exports_files`, the -visibility depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is set, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```python -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```python -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -You can change this behavior by disabling -[`--incompatible_visibility_private_attributes_at_definition`](https://github.com/bazelbuild/proposals/blob/master/designs/2019-10-15-tool-visibility.md). -When disabled, implicit dependencies are treated like any other dependency. -This means that the target being depended on (such as our C++ compiler) must be -visible to every instance of the rule. In practice this usually means the target -must have public visibility. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target definitions into a macro in a `.bzl` file. Without the protection of load -visibility, they might find their macro reused by other collaborators in the -same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```python -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```python -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```python -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```python -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```python -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```python -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```python -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```python -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```python -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```python -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/7.6.1/configure/attributes.mdx b/7.6.1/configure/attributes.mdx deleted file mode 100644 index 4fabae8..0000000 --- a/7.6.1/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//tools/target_cpu:x86": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//tools/target_cpu:x86": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/7.6.1/configure/best-practices.mdx b/7.6.1/configure/best-practices.mdx deleted file mode 100644 index 03fdd96..0000000 --- a/7.6.1/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `WORKSPACE` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/7.6.1/configure/coverage.mdx b/7.6.1/configure/coverage.mdx deleted file mode 100644 index 3db3b8e..0000000 --- a/7.6.1/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/7.6.1/contribute/breaking-changes.mdx b/7.6.1/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/7.6.1/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/7.6.1/contribute/codebase.mdx b/7.6.1/contribute/codebase.mdx deleted file mode 100644 index d36a005..0000000 --- a/7.6.1/contribute/codebase.mdx +++ /dev/null @@ -1,1663 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `ujexec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". - -Fundamentally, we need these types of workarounds because we routinely have -hundreds of thousands of in-flight Skyframe nodes, and Java doesn't support -lightweight threads. - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to tell things about other configured targets that depend on it. The reason why -"transitive" is in their name is that this is usually some sort of roll-up of -the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source of output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where a -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports - for this group. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It's under review in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can declare that a -particular target can only be depended on using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used to -special-case some things: - -* Aggregating middlemen are used to group artifacts together. This is so that - if a lot of actions use the same large set of inputs, we don't have N\*M - dependency edges, only N+M (they are being replaced with nested sets) -* Scheduling dependency middlemen ensure that an action runs before another. - They are mostly used for linting but also for C++ compilation (see - `CcCompilationContext.createMiddleman()` for an explanation) -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is_ mostly _written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/7.6.1/contribute/design-documents.mdx b/7.6.1/contribute/design-documents.mdx deleted file mode 100644 index ec2cbec..0000000 --- a/7.6.1/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, WORKSPACE, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/7.6.1/contribute/docs-style-guide.mdx b/7.6.1/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/7.6.1/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/7.6.1/contribute/docs.mdx b/7.6.1/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/7.6.1/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/7.6.1/contribute/index.mdx b/7.6.1/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/7.6.1/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/7.6.1/contribute/maintainers-guide.mdx b/7.6.1/contribute/maintainers-guide.mdx deleted file mode 100644 index 468bfc1..0000000 --- a/7.6.1/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,214 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [sventiffe](https://github.com/sventiffe) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team - * Contact: [philomathing](https://github.com/philomathing) -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [oquenchil](https://github.com/oquenchil) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/7.6.1/contribute/naming.mdx b/7.6.1/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/7.6.1/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/7.6.1/contribute/patch-acceptance.mdx b/7.6.1/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/7.6.1/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/7.6.1/contribute/policy.mdx b/7.6.1/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/7.6.1/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/7.6.1/contribute/release-notes.mdx b/7.6.1/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/7.6.1/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/7.6.1/contribute/statemachine-guide.mdx b/7.6.1/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/7.6.1/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/7.6.1/contribute/windows-chocolatey-maintenance.mdx b/7.6.1/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/7.6.1/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/7.6.1/contribute/windows-scoop-maintenance.mdx b/7.6.1/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/7.6.1/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/7.6.1/docs/android-build-performance.mdx b/7.6.1/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/7.6.1/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/7.6.1/docs/android-instrumentation-test.mdx b/7.6.1/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/7.6.1/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/7.6.1/docs/android-ndk.mdx b/7.6.1/docs/android-ndk.mdx deleted file mode 100644 index b89493c..0000000 --- a/7.6.1/docs/android-ndk.mdx +++ /dev/null @@ -1,425 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the STL - -To configure the C++ STL, use the flag `--android_crosstool_top`. - -```posix-terminal -bazel build //:app --android_crosstool_top={{ "" }}target label{{ "" }} -``` - -The available STLs in `@androidndk` are: - -| STL | Target label | -|---------|-----------------------------------------| -| STLport | `@androidndk//:toolchain-stlport` | -| libc++ | `@androidndk//:toolchain-libcpp` | -| gnustl | `@androidndk//:toolchain-gnu-libstdcpp` | - -For r16 and below, the default STL is `gnustl`. For r17 and above, it is -`libc++`. For convenience, the target `@androidndk//:default_crosstool` is -aliased to the respective default STLs. - -Please note that from r18 onwards, [STLport and gnustl will be -removed](https://android.googlesource.com/platform/ndk/+/master/docs/Roadmap.md#ndk-r18), -making `libc++` the only STL in the NDK. - -See the [NDK -documentation](https://developer.android.com/ndk/guides/cpp-support) -for more information on these STLs. - -## Configuring the target ABI - -To configure the target ABI, use the `--fat_apk_cpu` flag as follows: - -```posix-terminal -bazel build //:app --fat_apk_cpu={{ "" }}comma-separated list of ABIs{{ "" }} -``` - -By default, Bazel builds native Android code for `armeabi-v7a`. To build for x86 -(such as for emulators), pass `--fat_apk_cpu=x86`. To create a fat APK for multiple -architectures, you can specify multiple CPUs: `--fat_apk_cpu=armeabi-v7a,x86`. - -If more than one ABI is specified, Bazel will build an APK containing a shared -object for each ABI. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Integration with platforms and toolchains - -Bazel's configuration model is moving towards -[platforms](/extending/platforms) and -[toolchains](/extending/toolchains). If your -build uses the `--platforms` flag to select for the architecture or operating system -to build for, you will need to pass the `--extra_toolchains` flag to Bazel in -order to use the NDK. - -For example, to integrate with the `android_arm64_cgo` toolchain provided by -the Go rules, pass `--extra_toolchains=@androidndk//:all` in addition to the -`--platforms` flag. - -```posix-terminal -bazel build //my/cc:lib \ - --platforms=@io_bazel_rules_go//go/toolchain:android_arm64_cgo \ - --extra_toolchains=@androidndk//:all -``` - -You can also register them directly in the `WORKSPACE` file: - -```python -android_ndk_repository(name = "androidndk") -register_toolchains("@androidndk//:all") -``` - -Registering these toolchains tells Bazel to look for them in the NDK `BUILD` -file (for NDK 20) when resolving architecture and operating system constraints: - -```python -toolchain( - name = "x86-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:x86_32", - ], - toolchain = "@androidndk//:x86-clang8.0.7-libcpp", -) - -toolchain( - name = "x86_64-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:x86_64", - ], - toolchain = "@androidndk//:x86_64-clang8.0.7-libcpp", -) - -toolchain( - name = "arm-linux-androideabi-clang8.0.7-v7a-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:arm", - ], - toolchain = "@androidndk//:arm-linux-androideabi-clang8.0.7-v7a-libcpp", -) - -toolchain( - name = "aarch64-linux-android-clang8.0.7-libcpp_toolchain", - toolchain_type = "@bazel_tools//tools/cpp:toolchain_type", - target_compatible_with = [ - "@platforms//os:android", - "@platforms//cpu:aarch64", - ], - toolchain = "@androidndk//:aarch64-linux-android-clang8.0.7-libcpp", -) -``` - -## How it works: introducing Android configuration transitions - -The `android_binary` rule can explicitly ask Bazel to build its dependencies in -an Android-compatible configuration so that the Bazel build *just works* without -any special flags, except for `--fat_apk_cpu` and `--android_crosstool_top` for -ABI and STL configuration. - -Behind the scenes, this automatic configuration uses Android [configuration -transitions](/extending/rules#configurations). - -A compatible rule, like `android_binary`, automatically changes the -configuration of its dependencies to an Android configuration, so only -Android-specific subtrees of the build are affected. Other parts of the build -graph are processed using the top-level target configuration. It may even -process a single target in both configurations, if there are paths through the -build graph to support that. - -Once Bazel is in an Android-compatible configuration, either specified at the -top level or due to a higher-level transition point, additional transition -points encountered do not further modify the configuration. - -The only built-in location that triggers the transition to the Android -configuration is `android_binary`'s `deps` attribute. - -Note: The `data` attribute of `android_binary` intentionally does *not* -trigger the transition. Additionally, `android_local_test` and `android_library` -intentionally do *not* trigger the transition at all. - -For example, if you try to build an `android_library` target with a `cc_library` -dependency without any flags, you may encounter an error about a missing JNI -header: - -``` -ERROR: {{ "" }}project{{ "" }}/app/src/main/BUILD.bazel:16:1: C++ compilation of rule '//app/src/main:jni_lib' failed (Exit 1) -app/src/main/cpp/native-lib.cpp:1:10: fatal error: 'jni.h' file not found -#include - ^~~~~~~ -1 error generated. -Target //app/src/main:lib failed to build -Use --verbose_failures to see the command lines of failed build steps. -``` - -Ideally, these automatic transitions should make Bazel do the right thing in the -majority of cases. However, if the target on the Bazel command-line is already -below any of these transition rules, such as C++ developers testing a specific -`cc_library`, then a custom `--crosstool_top` must be used. - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--crosstool_top`, `--cpu` and `--host_crosstool_top` -flags. - -For example: - -```posix-terminal -bazel build //my/cc/jni:target \ - --crosstool_top=@androidndk//:default_crosstool \ - --cpu= \ - --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -``` - -In this example, the top-level `cc_library` and `cc_binary` targets are built -using the NDK toolchain. However, this causes Bazel's own host tools to be built -with the NDK toolchain (and thus for Android), because the host toolchain is -copied from the target toolchain. To work around this, specify the value of -`--host_crosstool_top` to be `@bazel_tools//tools/cpp:toolchain` to -explicitly set the host's C++ toolchain. - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --crosstool_top=@androidndk//:default_crosstool -common:android_x86 --cpu=x86 -common:android_x86 --host_crosstool_top=@bazel_tools//tools/cpp:toolchain - -common:android_armeabi-v7a --crosstool_top=@androidndk//:default_crosstool -common:android_armeabi-v7a --cpu=armeabi-v7a -common:android_armeabi-v7a --host_crosstool_top=@bazel_tools//tools/cpp:toolchain - -# In general -common:android_ --crosstool_top=@androidndk//:default_crosstool -common:android_ --cpu= -common:android_ --host_crosstool_top=@bazel_tools//tools/cpp:toolchain -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/7.6.1/docs/bazel-and-android.mdx b/7.6.1/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/7.6.1/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/7.6.1/docs/bazel-and-apple.mdx b/7.6.1/docs/bazel-and-apple.mdx deleted file mode 100644 index de33c9a..0000000 --- a/7.6.1/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app)) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/7.6.1/docs/bazel-and-cpp.mdx b/7.6.1/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/7.6.1/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/7.6.1/docs/bazel-and-java.mdx b/7.6.1/docs/bazel-and-java.mdx deleted file mode 100644 index 78040c4..0000000 --- a/7.6.1/docs/bazel-and-java.mdx +++ /dev/null @@ -1,344 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output`](/rules/lib/builtins/java_output) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using `local_java_repository` or -`remote_java_repository` rules in the `WORKSPACE` file. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@bazel_tools//tools/jdk:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `WORKSPACE` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@bazel_tools//tools/jdk:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@bazel_tools//tools/jdk:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@bazel_tools//tools/jdk:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/7.6.1/docs/bazel-and-javascript.mdx b/7.6.1/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/7.6.1/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/7.6.1/docs/configurable-attributes.mdx b/7.6.1/docs/configurable-attributes.mdx deleted file mode 100644 index 3924eb4..0000000 --- a/7.6.1/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//tools/target_cpu:x86": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//tools/target_cpu:x86": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//tools/target_cpu:x86": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/7.6.1/docs/sandboxing.mdx b/7.6.1/docs/sandboxing.mdx deleted file mode 100644 index 5ea7889..0000000 --- a/7.6.1/docs/sandboxing.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel, installing `sandboxfs`, and debugging -your sandboxing environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## sandboxfs - -`sandboxfs` is a FUSE file system that exposes an arbitrary view of the -underlying file system without time penalties. Bazel uses `sandboxfs` to -generate `execroot/` instantaneously for each action, avoiding the cost of -issuing thousands of system calls. Note that further I/O within `execroot/` may -be slower due to FUSE overhead. - -### Install sandboxfs - -Use the following steps to install `sandboxfs` and perform a Bazel build with -it: - -**Download** - -[Download and install](https://github.com/bazelbuild/sandboxfs/blob/master/INSTALL.md) -`sandboxfs` so that the `sandboxfs` binary ends up in your `PATH`. - -**Run `sandboxfs`** - -1. (macOS-only) [Install OSXFUSE](https://osxfuse.github.io/). -2. (macOS-only) Run: - - ```posix-terminal - sudo sysctl -w vfs.generic.osxfuse.tunables.allow_other=1 - ``` - - You will need to do this after installation and after every reboot to ensure - core macOS system services work through sandboxfs. - -3. Run a Bazel build with `--experimental_use_sandboxfs`. - - ```posix-terminal - bazel build {{ '' }}target{{ '' }} --experimental_use_sandboxfs - ``` - -**Troubleshooting** - -If you see `local` instead of `darwin-sandbox` or `linux-sandbox` as an -annotation for the actions that are executed, this may mean that sandboxing is -disabled. Pass `--genrule_strategy=sandboxed --spawn_strategy=sandboxed` to -enable it. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/7.6.1/extending/aspects.mdx b/7.6.1/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/7.6.1/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/7.6.1/extending/concepts.mdx b/7.6.1/extending/concepts.mdx deleted file mode 100644 index 3842dcd..0000000 --- a/7.6.1/extending/concepts.mdx +++ /dev/null @@ -1,104 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A [macro](/extending/macros) is a function that instantiates rules. It is useful when a -`BUILD` file is getting too repetitive or too complex, as it allows you to reuse -some code. The function is evaluated as soon as the `BUILD` file is read. After -the evaluation of the `BUILD` file, Bazel has little information about macros: -if your macro generates a `genrule`, Bazel will behave as if you wrote the -`genrule`. As a result, `bazel query` will only list the generated `genrule`. - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most -users will never have to write one; they will only load and call existing -rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to - reuse some code. Then [learn more about macros](/extending/macros) and - [using them to create "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you may want to write -[aspects](/extending/aspects) and [repository rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/7.6.1/extending/depsets.mdx b/7.6.1/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/7.6.1/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/7.6.1/extending/exec-groups.mdx b/7.6.1/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/7.6.1/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/7.6.1/extending/platforms.mdx b/7.6.1/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/7.6.1/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/7.6.1/extending/repo.mdx b/7.6.1/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/7.6.1/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/7.6.1/extending/rules.mdx b/7.6.1/extending/rules.mdx deleted file mode 100644 index 3af9674..0000000 --- a/7.6.1/extending/rules.mdx +++ /dev/null @@ -1,1247 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`cc_library` and `java_binary`, provide some core support for certain languages. -By defining your own rules, you can add similar support for languages and tools -that Bazel does not support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function does not run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [kind of rule](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable=True`), or specifically -a test executable (with `test=True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule cannot be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user does not -specify in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which cannot be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can not actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency via that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files via a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -For the legacy style in which a [`struct`](/rules/lib/builtins/struct) is returned from a -target's implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel does not -perform file I/O during the analysis phase, these objects cannot be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files cannot be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies via that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive=transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file=True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable=True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). Often, -the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive=transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive=[headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with=",") - args.add_joined("-s", srcs, join_with=",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it cannot depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs will not be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which will decide which actions are -executed. It is an error if there is a cycle in the dependency graph. Creating -an action does not guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of `Provider` objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions will not be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields={ - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances obey certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments do not match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# It's possible to define an init accepting positional arguments, but -# keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {'files_to_link': files_to_link, 'headers': all_headers} - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init) - -export ExampleInfo -``` - -A rule implementation may then instantiate the provider as follows: - -```python - ExampleInfo( - files_to_link=my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers - ) -``` - -The raw constructor can be used to define alternative public factory functions -that do not go through the `init` logic. For example, in exampleinfo.bzl we -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code cannot load it and -generate arbitrary provider instances. - -Another use for `init` is to simply prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable=True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you do not specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it does not support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes cannot be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size="small", **kwargs): - _example_test(size=size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment or command line -argument/flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs via [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they do not define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built in the -"target" configuration, while tools that should run on the execution platform -are built in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable=True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg="exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg="target"` in -the attribute. - -`cfg="target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable=False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg=my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp`, `java` and `jvm`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - host_fragments = ["java"], # Required fragments of the host configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns true in -coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"host"` or `"exec"` in the attribute schema) in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules cannot do this validation at analysis time because rules cannot read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action does not produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action will not be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the host or exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path]) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec"), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel does not currently -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a struct returned by the rule's implementation function. - -*This style is deprecated and should not be used in new code;* see below for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x="foo", ...) - modern_data = MyInfo(y="bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus do not create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `*Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the above syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept/require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/7.6.1/extending/toolchains.mdx b/7.6.1/extending/toolchains.mdx deleted file mode 100644 index 972fc94..0000000 --- a/7.6.1/extending/toolchains.mdx +++ /dev/null @@ -1,597 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as input a -set of required toolchain types, the target platform, the list of available -execution platforms, and the list of available toolchains. Its outputs are a -selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel`` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/7.6.1/external/advanced.mdx b/7.6.1/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/7.6.1/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/7.6.1/external/lockfile.mdx b/7.6.1/external/lockfile.mdx deleted file mode 100644 index f2a75b2..0000000 --- a/7.6.1/external/lockfile.mdx +++ /dev/null @@ -1,277 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/7.6.1/external/module.mdx b/7.6.1/external/module.mdx deleted file mode 100644 index c5757fc..0000000 --- a/7.6.1/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}~{{ "" }}version{{ -"" }}` (for example, `bazel_skylib~1.0.3`) or `{{ "" }}module_name{{ -"" }}~` (for example, `bazel_features~`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/7.6.1/help.mdx b/7.6.1/help.mdx deleted file mode 100644 index a48d2b7..0000000 --- a/7.6.1/help.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/7.6.1/install/bazelisk.mdx b/7.6.1/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/7.6.1/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/7.6.1/install/compile-source.mdx b/7.6.1/install/compile-source.mdx deleted file mode 100644 index 4b51f17..0000000 --- a/7.6.1/install/compile-source.mdx +++ /dev/null @@ -1,293 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/7.6.1/install/completion.mdx b/7.6.1/install/completion.mdx deleted file mode 100644 index 856784c..0000000 --- a/7.6.1/install/completion.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/7.6.1/install/docker-container.mdx b/7.6.1/install/docker-container.mdx deleted file mode 100644 index 4ffb977..0000000 --- a/7.6.1/install/docker-container.mdx +++ /dev/null @@ -1,133 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -root@5a99103747c6:~# git clone https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -root@5a99103747c6:~/abseil-cpp# bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -root@5a99103747c6:~/abseil-cpp# bazel build --config=--config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -root@5a99103747c6:/# -``` - -Explore the container contents. - -```posix-terminal -root@5a99103747c6:/# clang --version -clang version 8.0.0 (trunk 340178) -Target: x86_64-unknown-linux-gnu -Thread model: posix -InstalledDir: /usr/local/bin - -root@5a99103747c6:/# java -version -openjdk version "1.8.0_181" -OpenJDK Runtime Environment (build 1.8.0_181-8u181-b13-0ubuntu0.16.04.1-b13) -OpenJDK 64-Bit Server VM (build 25.181-b13, mixed mode) - -root@5a99103747c6:/# python -V -Python 2.7.12 - -root@5a99103747c6:/# python3 -V -Python 3.6.6 - -root@5a99103747c6:/# bazel version -Extracting Bazel installation... -Build label: 6.0.0 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Mon Dec 19 15:52:35 2022 (1671465155) -Build timestamp: 1671465155 -Build timestamp as int: 1671465155 -``` diff --git a/7.6.1/install/ide.mdx b/7.6.1/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/7.6.1/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/7.6.1/install/index.mdx b/7.6.1/install/index.mdx deleted file mode 100644 index 396812c..0000000 --- a/7.6.1/install/index.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Linuxbrew](https://github.com/Linuxbrew/homebrew-core/blob/master/Formula/bazel.rb) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/7.6.1/install/os-x.mdx b/7.6.1/install/os-x.mdx deleted file mode 100644 index 7d4a0ac..0000000 --- a/7.6.1/install/os-x.mdx +++ /dev/null @@ -1,140 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/7.6.1/install/suse.mdx b/7.6.1/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/7.6.1/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/7.6.1/install/ubuntu.mdx b/7.6.1/install/ubuntu.mdx deleted file mode 100644 index 79a615c..0000000 --- a/7.6.1/install/ubuntu.mdx +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/7.6.1/migrate/index.mdx b/7.6.1/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/7.6.1/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/7.6.1/migrate/maven.mdx b/7.6.1/migrate/maven.mdx deleted file mode 100644 index a47e135..0000000 --- a/7.6.1/migrate/maven.mdx +++ /dev/null @@ -1,257 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences -between Maven and Bazel, and provides a migration example using the -Guava project. - -When migrating from any build tool to Bazel, it's best to have both build -tools running in parallel until you have fully migrated your development team, -CI system, and any other relevant systems. You can run Maven and Bazel in the -same repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external), -it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial - [Introduction to Bazel: Build Java](/start/java) before you start - migrating. The tutorial explains Bazel's concepts, structure, and label - syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build - files and multiple targets per `BUILD` file, allowing for builds that - are more incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does - not automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the WORKSPACE file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the -[Guava project](https://github.com/google/guava) from Maven to Bazel. -The Guava project used is release `v31.1`. The examples using Guava do not walk through -each step in the migration, but they do show the files and contents that are -generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the WORKSPACE file - -Create a file named `WORKSPACE` at the root of your project. If your project -has no external dependencies, the workspace file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the workspace -file. To automate the listing of external dependencies for the workspace file, -use `rules_jvm_external`. For instructions about using this ruleset, see -[the README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external). - -Note: The previously recommended tool, `generate_workspace`, is no longer -maintained by the Bazel team. - -#### Guava project example: external dependencies - -You can list the external dependencies of the -[Guava project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `WORKSPACE` file: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "4.3" -RULES_JVM_EXTERNAL_SHA = "6274687f6fc5783b589f56a2f1ed60de3ce1f99bc4e8f9edef3de43bdf7c6e74" - -http_archive( - name = "rules_jvm_external", - sha256 = RULES_JVM_EXTERNAL_SHA, - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your project -should be built. Unlike Maven with its one `pom.xml` file, Bazel can use many -`BUILD` files to build a project. These files specify multiple build targets, -which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file -at the root of your project and using it to do an initial build using Bazel. -Then, you refine your build by adding more `BUILD` files with more granular -targets. - -1. In the same directory as your `WORKSPACE` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to - build your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples above, - the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. For example, if you generated a list of external - dependencies using the tool `generate_workspace`, the dependencies - for `java_library` are the libraries listed in the - `generated_java_libraries` macro. - * Take a look at the - [example below of this top-level BUILD file](#guava-2) from - the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build - your project to ensure that it works. On the command line, from your - workspace directory, use `bazel build //:everything` to build your - project with Bazel. - - The project has now been successfully built with Bazel. You will need - to add more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used -to build the entire project. Here are the contents of this initial `BUILD` -file in the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your first -build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with - Java packages that have the fewest dependencies and work you way up - to packages with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number - of packages grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add - a `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build - file. Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel -to produce all of your builds. diff --git a/7.6.1/migrate/xcode.mdx b/7.6.1/migrate/xcode.mdx deleted file mode 100644 index 328b1ca..0000000 --- a/7.6.1/migrate/xcode.mdx +++ /dev/null @@ -1,266 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps -for converting an Xcode project to a Bazel project. It also provides -troubleshooting solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present - within the workspace directory or specified as imports in the `WORKSPACE` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the - [iOS app tutorial](/start/ios-app)). You should understand the Bazel - workspace, including the `WORKSPACE` and `BUILD` files, as well as the - concepts of targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see -[Working with external dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `WORKSPACE` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `WORKSPACE` file - -Create a `WORKSPACE` file in a new directory. This directory becomes the Bazel -workspace root. If the project uses no external dependencies, this file can be -empty. If the project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the `WORKSPACE` -file. - -Note: Place the project source code within the directory tree containing the - `WORKSPACE` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with [swift_bazel](https://github.com/cgrindel/swift_bazel), -you must convert them into Bazel packages as described in the [following tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/). - -Note: SwiftPM support is a manual process with many variables. -SwiftPM integration with Bazel has not been fully verified and is not -officially supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create -the `BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see -[Workspaces, packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a [`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an [`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build rules](https://github.com/bazelbuild/rules_apple) support -running unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) and [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) -target for each Objective-C library and a [`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) -function to include all sources and/or headers of a certain type. Use it -carefully as it might include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in the -[rules_apple examples directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - - -For more information on build rules, see [Apple Rules for Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number - of packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix - build errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `WORKSPACE` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features). - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/7.6.1/query/aquery.mdx b/7.6.1/query/aquery.mdx deleted file mode 100644 index 18b4152..0000000 --- a/7.6.1/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noexperimental_slim_json_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/7.6.1/query/cquery.mdx b/7.6.1/query/cquery.mdx deleted file mode 100644 index 725b09f..0000000 --- a/7.6.1/query/cquery.mdx +++ /dev/null @@ -1,643 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but [`visible`](/query/language#visible), -[`siblings`](/query/language#siblings), [`buildfiles`](/query/language#buildfiles), -and [`tests`](/query/language#tests). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a prevous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genquery` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=<whatever>` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/7.6.1/reference/glossary.mdx b/7.6.1/reference/glossary.mdx deleted file mode 100644 index bf4e6cf..0000000 --- a/7.6.1/reference/glossary.mdx +++ /dev/null @@ -1,689 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) function. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -**See also:** [Macro documentation](/extending/macros) - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/7.6.1/reference/skyframe.mdx b/7.6.1/reference/skyframe.mdx deleted file mode 100644 index 0a2cf0a..0000000 --- a/7.6.1/reference/skyframe.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build consists of evaluating the node that represents the build request (this is the state we are striving for, but there is a lot of legacy code in the way). First its `SkyFunction` is found and called with the key of the top-level `SkyKey`. The function then requests the evaluation of the nodes it needs to evaluate the top-level node, which in turn result in other function invocations, and so on, until the leaf nodes are reached (which are usually nodes representing input files in the file system). Finally, we end up with the value of the top-level `SkyValue`, some side effects (such as output files in the file system) and a directed acyclic graph of the dependencies between the nodes that were involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in advance all of the nodes it needs to do its job. A simple example is evaluating an input file node that turns out to be a symlink: the function tries to read the file, realizes that it’s a symlink, and thus fetches the file system node representing the target of the symlink. But that itself can be a symlink, in which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the services provided to it by an interface called `SkyFunction.Environment`. These are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If the node is available, its value is returned, otherwise, `null` is returned and the function itself is expected to return `null`. In the latter case, the dependent node is evaluated, and then the original node builder is invoked again, but this time the same `env.getValue` call will return a non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. This does essentially the same, except that the dependent nodes are evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs to be taken that two different functions do not step on each other’s toes. In general, write side effects (where data flows outwards from Bazel) are okay, read side effects (where data flows inwards into Bazel without a registered dependency) are not, because they are an unregistered dependency and as such, can cause incorrect incremental builds. - -`SkyFunction` implementations should not access data in any other way than requesting dependencies (such as by directly reading the file system), because that results in Bazel not registering the data dependency on the file that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on other nodes, Bazel can guarantee that if the input state is the same, the same data is returned. If all sky functions are deterministic, this means that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions is recorded, Bazel can invalidate only the exact set of nodes that need to be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of requesting dependencies, functions that do not depend on each other can be run in parallel and Bazel can guarantee that the result is the same as if they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel can build up a complete data flow graph from the input files to the output files, and use this information to only rebuild those nodes that actually need to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one and the top-down one. Which one is optimal depends on how the dependency graph looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed inputs is known, all the nodes are invalidated that transitively depend on changed files. This is optimal if we know that the same top-level node will be built again. Note that bottom-up invalidation requires running `stat()` on all input files of the previous build to determine if they were changed. This can be improved by using `inotify` or a similar mechanism to learn about changed files. - -* During top-down invalidation, the transitive closure of the top-level node is checked and only those nodes are kept whose transitive closure is clean. This is better if we know that the current node graph is large, but we only need a small subset of it in the next build: bottom-up invalidation would invalidate the larger graph of the first build, unlike top-down invalidation, which just walks the small graph of second build. - -We currently only do bottom-up invalidation. - -To get further incrementality, we use _change pruning_: if a node is invalidated, but upon rebuild, it is discovered that its new value is the same as its old value, the nodes that were invalidated due to a change in this node are “resurrected”. - -This is useful, for example, if one changes a comment in a C++ file: then the `.o` file generated from it will be the same, thus, we don’t need to call the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an all-or-nothing affair: when a dependency changes, the dependent node is always rebuilt from scratch, even if a better algorithm would exist that would mutate the old value of the node based on the changes. A few examples where this would be useful: - -* Incremental linking -* When a single `.class` file changes in a `.jar`, we could theoretically modify the `.jar` file instead of building it from scratch again. - -The reason why Bazel currently does not support these things in a principled way (we have some measure of support for incremental linking, but it’s not implemented within Skyframe) is twofold: we only had limited performance gains and it was hard to guarantee that the result of the mutation is the same as that of a clean rebuild would be, and Google values builds that are bit-for-bit repeatable. - -Until now, we could always achieve good enough performance by simply decomposing an expensive build step and achieving partial re-evaluation that way: it splits all the classes in an app into multiple groups and does dexing on them separately. This way, if classes in a group do not change, the dexing does not have to be redone. - -## Mapping to Bazel concepts - -This is a rough overview of some of the `SkyFunction` implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, we also compute additional information in order to detect changes to the file. This is the lowest level node in the Skyframe graph and has no dependencies. -* **FileValue**. Used by anything that cares about the actual contents and/or resolved path of a file. Depends on the corresponding `FileStateValue` and any symlinks that need to be resolved (such as the `FileValue` for `a/b` needs the resolved path of `a` and the resolved path of `a/b`). The distinction between `FileStateValue` is important because in some cases (for example, evaluating file system globs (such as `srcs=glob(["*/*.java"])`) the contents of the file are not actually needed. -* **DirectoryListingValue**. Essentially the result of `readdir()`. Depends on the associated `FileValue` associated with the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on the `FileValue` of the associated `BUILD` file, and also transitively on any `DirectoryListingValue` that is used to resolve the globs in the package (the data structure representing the contents of a `BUILD` file internally) -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple of the set of actions generated during the analysis of a target and information provided to configured targets that depend on this one. Depends on the `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` of direct dependencies, and a special node representing the build configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an output artifacts (artifacts are almost equivalent to files, and are used to refer to files during the actual execution of build steps). For source files, it depends on the `FileValue` of the associated node, for output artifacts, it depends on the `ActionExecutionValue` of whatever action generates the artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on the `ArtifactValues` of its input files. The action it executes is currently contained within its sky key, which is contrary to the concept that sky keys should be small. We are working on solving this discrepancy (note that `ActionExecutionValue` and `ArtifactValue` are unused if we do not run the execution phase on Skyframe). diff --git a/7.6.1/release/backward-compatibility.mdx b/7.6.1/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/7.6.1/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/7.6.1/release/index.mdx b/7.6.1/release/index.mdx deleted file mode 100644 index 8add150..0000000 --- a/7.6.1/release/index.mdx +++ /dev/null @@ -1,176 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 7 | Rolling| [Check GitHub release page](https://github.com/bazelbuild/bazel/releases) | N/A | -| Bazel 6 | Active | [6.4.0](https://github.com/bazelbuild/bazel/releases/tag/6.4.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Maintenance | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 4 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created. -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/7.6.1/release/rule-compatibility.mdx b/7.6.1/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/7.6.1/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/7.6.1/remote/bep-examples.mdx b/7.6.1/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/7.6.1/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/7.6.1/remote/bep-glossary.mdx b/7.6.1/remote/bep-glossary.mdx deleted file mode 100644 index 3aab682..0000000 --- a/7.6.1/remote/bep-glossary.mdx +++ /dev/null @@ -1,417 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--expand_configs_in_place", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/7.6.1/remote/bep.mdx b/7.6.1/remote/bep.mdx deleted file mode 100644 index 936f174..0000000 --- a/7.6.1/remote/bep.mdx +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L91) -or [structured -information](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L123) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/16a107d/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L469). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_best_effort` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--project_id` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/7.6.1/remote/cache-local.mdx b/7.6.1/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/7.6.1/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/7.6.1/remote/cache-remote.mdx b/7.6.1/remote/cache-remote.mdx deleted file mode 100644 index b6bb957..0000000 --- a/7.6.1/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs-the-execution-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/7.6.1/remote/caching.mdx b/7.6.1/remote/caching.mdx deleted file mode 100644 index c824c3c..0000000 --- a/7.6.1/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_cache_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [TurboCache](https://github.com/allada/turbo-cache) diff --git a/7.6.1/remote/creating.mdx b/7.6.1/remote/creating.mdx deleted file mode 100644 index f9b761f..0000000 --- a/7.6.1/remote/creating.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output string that describes any errors encountered in processing or executing -the request. The `output` field contains a short description; complete logs may -be written to the worker's `stderr`. Because workers may only write -`WorkResponses` to `stdout`, it's common for the worker to redirect the `stdout` -of any tools it uses to `stderr`. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/7.6.1/remote/multiplex.mdx b/7.6.1/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/7.6.1/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/7.6.1/remote/output-directories.mdx b/7.6.1/remote/output-directories.mdx deleted file mode 100644 index b98d70a..0000000 --- a/7.6.1/remote/output-directories.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `${XDG_CACHE_HOME}/bazel` (or - `~/.cache/bazel`, if the `XDG_CACHE_HOME` environment variable is not set) on - Linux, `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <...my-project> <== Symlink to execRoot - bazel-out => <...bin> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - action_outs/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/7.6.1/remote/persistent.mdx b/7.6.1/remote/persistent.mdx deleted file mode 100644 index 77f65e4..0000000 --- a/7.6.1/remote/persistent.mdx +++ /dev/null @@ -1,272 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -You can use the -[`--high_priority_workers`](/reference/command-line-reference#flag--high_priority_workers) -flag to specify a mnemonic that should be run in preference to normal-priority -mnemonics. This can help prioritize actions that are always in the critical -path. If there are two or more high priority workers executing requests, all -other workers are prevented from running. This flag can be used multiple times. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/7.6.1/remote/rbe.mdx b/7.6.1/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/7.6.1/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/7.6.1/remote/rules.mdx b/7.6.1/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/7.6.1/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/7.6.1/remote/sandbox.mdx b/7.6.1/remote/sandbox.mdx deleted file mode 100644 index cfb9be4..0000000 --- a/7.6.1/remote/sandbox.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --define=EXECUTOR=remote -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/7.6.1/remote/workspace.mdx b/7.6.1/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/7.6.1/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/7.6.1/rules/bzl-style.mdx b/7.6.1/rules/bzl-style.mdx deleted file mode 100644 index 36f14b7..0000000 --- a/7.6.1/rules/bzl-style.mdx +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/7.6.1/rules/challenges.mdx b/7.6.1/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/7.6.1/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/7.6.1/rules/deploying.mdx b/7.6.1/rules/deploying.mdx deleted file mode 100644 index f658dd8..0000000 --- a/7.6.1/rules/deploying.mdx +++ /dev/null @@ -1,277 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - WORKSPACE - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### WORKSPACE - -In the project's `WORKSPACE`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -workspace(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains (at least) what -users will need to copy-paste into their `WORKSPACE` file to use your rule. -In general, this will be a `http_archive` pointing to your GitHub release and -a macro call that downloads/configures any tools your rule needs. For example, -for the [Go -rules](https://github.com/bazelbuild/rules_go#setup), this -looks like: - -``` -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "rules_go", - urls = ["https://github.com/bazelbuild/rules_go/releases/download/0.18.5/rules_go-0.18.5.tar.gz"], - sha256 = "a82a352bffae6bee4e95f68a8d80a70e87f42c4741e6a448bec11998fcc82329", -) -load("@rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains") -go_rules_dependencies() -go_register_toolchains() -``` - -If your rules depend on another repository's rules, specify that in the -rules documentation (for example, see the -[Skydoc rules](https://skydoc.bazel.build/docs/getting_started_stardoc.html), -which depend on the Sass rules), and provide a `WORKSPACE` -macro that will download all dependencies (see `rules_go` above). - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies. To make depending on your rules -simpler, please provide a `WORKSPACE` macro that will declare dependencies on -those external dependencies. Do not declare dependencies of tests there, only -dependencies that rules require to work. Put development dependencies into the -`WORKSPACE` file. - -Create a file named `/repositories.bzl` and provide a single entry point -macro named `rules__dependencies`. Our directory will look as follows: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl - repositories.bzl -``` - - -#### Registering toolchains - -Your rules might also register toolchains. Please provide a separate `WORKSPACE` -macro that registers these toolchains. This way users can decide to omit the -previous macro and control dependencies manually, while still being allowed to -register toolchains. - -Therefore add a `WORKSPACE` macro named `rules__toolchains` into -`/repositories.bzl` file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `WORKSPACE` file. This snippet in general will look as follows: - -``` -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "rules_", - urls = ["//:repositories.bzl", "rules__dependencies", "rules__toolchains") -rules__dependencies() -rules__toolchains() -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to copy-paste a rule into their `WORKSPACE` file, as shown in the -`README.md` section above. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/7.6.1/rules/errors/read-only-variable.mdx b/7.6.1/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/7.6.1/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/7.6.1/rules/faq.mdx b/7.6.1/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/7.6.1/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/7.6.1/rules/index.mdx b/7.6.1/rules/index.mdx deleted file mode 100644 index 7d342af..0000000 --- a/7.6.1/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Kubernetes](https://github.com/bazelbuild/rules_k8s) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/7.6.1/rules/macro-tutorial.mdx b/7.6.1/rules/macro-tutorial.mdx deleted file mode 100644 index 4e9be9c..0000000 --- a/7.6.1/rules/macro-tutorial.mdx +++ /dev/null @@ -1,79 +0,0 @@ ---- -title: 'Creating a Macro' ---- - - - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -``` python -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -``` python -def miniature(name, src, size="100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - -* By convention, macros have a `name` argument, just like rules. - -* To document the behavior of a macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - -* To call a `genrule`, or any other native rule, use `native.`. - -* Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like `visibility`, - or `tags`. - -Now, use the macro from the `BUILD` file: - -``` python -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/7.6.1/rules/performance.mdx b/7.6.1/rules/performance.mdx deleted file mode 100644 index 8afa2f9..0000000 --- a/7.6.1/rules/performance.mdx +++ /dev/null @@ -1,295 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.4 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/7.6.1/rules/rules-tutorial.mdx b/7.6.1/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/7.6.1/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/7.6.1/rules/testing.mdx b/7.6.1/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/7.6.1/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/7.6.1/rules/verbs-tutorial.mdx b/7.6.1/rules/verbs-tutorial.mdx deleted file mode 100644 index e048237..0000000 --- a/7.6.1/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following macro to create targets for both of the above -rules together: - -```python -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/7.6.1/run/bazelrc.mdx b/7.6.1/run/bazelrc.mdx deleted file mode 100644 index 056ccd0..0000000 --- a/7.6.1/run/bazelrc.mdx +++ /dev/null @@ -1,249 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `WORKSPACE` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage` inherits from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/7.6.1/run/client-server.mdx b/7.6.1/run/client-server.mdx deleted file mode 100644 index 3819034..0000000 --- a/7.6.1/run/client-server.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. - -When you run `bazel`, you're running the client. The client finds the server -based on the output base, which by default is determined by the path of the base -workspace directory and your userid, so if you build in multiple workspaces, -you'll have multiple output bases and thus multiple Bazel server processes. -Multiple users on the same workstation can build concurrently in the same -workspace because their output bases will differ (different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/7.6.1/run/scripts.mdx b/7.6.1/run/scripts.mdx deleted file mode 100644 index 8f31490..0000000 --- a/7.6.1/run/scripts.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you will need to give each one a different (or random) output base. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Reserved for Google-internal use. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/7.6.1/start/android-app.mdx b/7.6.1/start/android-app.mdx deleted file mode 100644 index 1a2a94f..0000000 --- a/7.6.1/start/android-app.mdx +++ /dev/null @@ -1,425 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel [workspace](/reference/be/workspace) that contains the source code - for the app and a `WORKSPACE` file that identifies the top level of the - workspace directory. -* Update the `WORKSPACE` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `WORKSPACE` file at -its root. - -The `WORKSPACE` file may be empty or may contain references to [external -dependencies](/docs/external) required to build your project. - -First, run the following command to create an empty `WORKSPACE` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch WORKSPACE` | -| Windows (Command Prompt) | `type nul > WORKSPACE` | -| Windows (PowerShell) | `New-Item WORKSPACE -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`WORKSPACE` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`WORKSPACE` file so that Bazel knows where to find them. - -Add the following line to your `WORKSPACE` file: - -```python -android_sdk_repository(name = "androidsdk") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - -You can also explicitly specify the absolute path of the Android SDK, -the API level, and the version of build tools to use by including the `path`, -`api_level`, and `build_tools_version` attributes. If `api_level` and -`build_tools_version` are not specified, the `android_sdk_repository` rule will -use the respective latest version available in the SDK. You can specify any -combination of these attributes, as long as they are present in the SDK, for -example: - -```python -android_sdk_repository( - name = "androidsdk", - path = "/path/to/Android/sdk", - api_level = 25, - build_tools_version = "30.0.3" -) -``` - -On Windows, note that the `path` attribute must use the mixed-style path, that -is, a Windows path with forward slashes: - -```python -android_sdk_repository( - name = "androidsdk", - path = "c:/path/to/Android/sdk", -) -``` - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and tell Bazel where to find it by adding the following line to your `WORKSPACE` file: - -```python -android_ndk_repository(name = "androidndk") -``` - -Similar to `android_sdk_repository`, the path to the Android NDK is inferred -from the `ANDROID_NDK_HOME` environment variable by default. The path can also -be explicitly specified with a `path` attribute on `android_ndk_repository`. - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -`api_level` is the version of the Android API that the SDK and NDK -target - for example, 23 for Android 6.0 and 25 for Android 7.1. If not -explicitly set, `api_level` defaults to the highest available API level for -`android_sdk_repository` and `android_ndk_repository`. - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `WORKSPACE`. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `WORKSPACE` is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/7.6.1/start/cpp.mdx b/7.6.1/start/cpp.mdx deleted file mode 100644 index 70b3ba4..0000000 --- a/7.6.1/start/cpp.mdx +++ /dev/null @@ -1,405 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You’re in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel’s context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven’t -already. This tutorial uses Git for source control, so for best results -[install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as -well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` directory. - -Take a look below at how it’s structured: - -``` -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── WORKSPACE - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── WORKSPACE - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── WORKSPACE -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In -the third and final stage, you will build a project with multiple packages and -build it with multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your [workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -### Set up the workspace - - Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains these significant files: - -* The [`WORKSPACE` file](https://bazel.build/reference/glossary#workspace-file) -, which identifies the directory and its contents as a Bazel workspace and -lives at the root of the project's directory structure. -* One or more [`BUILD` files](https://bazel.build/reference/glossary#build-file) -, which tell Bazel how to build different parts of the project. A -directory within the workspace that contains a BUILD file is a -[package](https://bazel.build/reference/glossary#package). (More on packages -later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `WORKSPACE` in that directory. For the purposes of this tutorial, -a `WORKSPACE` file is already present in each stage. - -**NOTE**: When Bazel builds the project, all inputs must be in -the same workspace. Files residing in different workspaces are independent of -one another unless linked. More detailed information about workspace rules can -be found in [this guide](https://bazel.build/reference/be/workspace). - - -### Understand the BUILD file - - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one [rule](https://bazel.build/reference/glossary#rule) -as a set of instructions, which tells Bazel how to build the desired outputs, -such as executable binaries or libraries. Each instance of a build rule in the -`BUILD` file is called a [target](https://bazel.build/reference/glossary#target) -and points to a specific set of source files and [dependencies](https://bazel.build/reference/glossary#dependency). -A target can also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -``` -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[cc_binary rule](https://bazel.build/reference/be/c-cpp#cc_binary). -The rule tells Bazel to build a self-contained executable binary from the -hello-world.cc source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - - -## Stage 1: single target, single package - -It’s time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -``` -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── WORKSPACE -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the -workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed “`Hello world`” message. - -Here’s the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world displays a single target with a single source file.") - - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a build -is structured. In the next stage, you will add complexity by adding another -target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up your -builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -``` - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── WORKSPACE -``` - -Take a look below at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -``` -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library -(using Bazel's built-in [cc_library rule](https://bazel.build/reference/be/c-cpp#cc_library)), -then the hello-world binary. The deps attribute in -the hello-world target tells Bazel that the hello-greet -library is required to build the hello-world binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another “`Hello world`”: - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only recompiles -that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an extra input -named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency graph for `hello-world` displays dependency changes after modification to the file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look below at the structure and contents of the -`cpp-tutorial/stage3` directory: - -``` -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── WORKSPACE -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -``` -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -``` -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package depends on the target in the `lib` package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for `hello-world` displays how the target in the main package depends on the target in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -``` -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You’ve now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the tutorials -for [Java](https://bazel.build/start/java), [Android application](https://bazel.build/start/android-app ), -or [iOS application](https://bazel.build/start/ios-app)). -* To learn more about working with local and remote repositories, read about -[external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel’s other rules, see this [reference guide](https://bazel.build/rules). - -Happy building! diff --git a/7.6.1/start/ios-app.mdx b/7.6.1/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/7.6.1/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/7.6.1/start/java.mdx b/7.6.1/start/java.mdx deleted file mode 100644 index 7d7cbf6..0000000 --- a/7.6.1/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -two source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/7.6.1/tutorials/cpp-dependency.mdx b/7.6.1/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/7.6.1/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/7.6.1/tutorials/cpp-labels.mdx b/7.6.1/tutorials/cpp-labels.mdx deleted file mode 100644 index b0fc953..0000000 --- a/7.6.1/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `WORKSPACE` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/7.6.1/tutorials/cpp-use-cases.mdx b/7.6.1/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 4bcfaf3..0000000 --- a/7.6.1/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── WORKSPACE -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Including external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest). -You can use one of the repository functions in the `WORKSPACE` file to -download Google Test and make it available in your repository: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "gtest", - url = "https://github.com/google/googletest/archive/release-1.10.0.zip", - sha256 = "94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91", - build_file = "@//:gtest.BUILD", -) -``` - -Note: If the destination already contains a `BUILD` file, you can leave -out the `build_file` attribute. - -Then create `gtest.BUILD`, a `BUILD` file used to compile Google Test. -Google Test has several "special" requirements that make its `cc_library` rule -more complicated: - -* `googletest-release-1.10.0/src/gtest-all.cc` `#include`s all other - files in `googletest-release-1.10.0/src/`: exclude it from the - compile to prevent link errors for duplicate symbols. - -* It uses header files that are relative to the -`googletest-release-1.10.0/include/` directory (`"gtest/gtest.h"`), so you must -add that directory to the include paths. - -* It needs to link in `pthread`, so add that as a `linkopt`. - -The final rule therefore looks like this: - -```python -cc_library( - name = "main", - srcs = glob( - ["googletest-release-1.10.0/src/*.cc"], - exclude = ["googletest-release-1.10.0/src/gtest-all.cc"] - ), - hdrs = glob([ - "googletest-release-1.10.0/include/**/*.h", - "googletest-release-1.10.0/src/*.h" - ]), - copts = [ - "-Iexternal/gtest/googletest-release-1.10.0/include", - "-Iexternal/gtest/googletest-release-1.10.0" - ], - linkopts = ["-pthread"], - visibility = ["//visibility:public"], -) -``` - -This is somewhat messy: everything is prefixed with `googletest-release-1.10.0` -as a byproduct of the archive's structure. You can make `http_archive` strip -this prefix by adding the `strip_prefix` attribute: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "gtest", - url = "https://github.com/google/googletest/archive/release-1.10.0.zip", - sha256 = "94c634d499558a76fa649edb13721dce6e98fb1e7018dfaeba3cd7a083945e91", - build_file = "@//:gtest.BUILD", - strip_prefix = "googletest-release-1.10.0", -) -``` - -Then `gtest.BUILD` would look like this: - -```python -cc_library( - name = "main", - srcs = glob( - ["src/*.cc"], - exclude = ["src/gtest-all.cc"] - ), - hdrs = glob([ - "include/**/*.h", - "src/*.h" - ]), - copts = ["-Iexternal/gtest/include"], - linkopts = ["-pthread"], - visibility = ["//visibility:public"], -) -``` - -Now `cc_` rules can depend on `@gtest//:main`. - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = ["-Iexternal/gtest/include"], - deps = [ - "@gtest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/7.6.1/versions/index.mdx b/7.6.1/versions/index.mdx deleted file mode 100644 index c92a15b..0000000 --- a/7.6.1/versions/index.mdx +++ /dev/null @@ -1,17 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The documentation on this website represents the latest in Bazel. Documentation -is updated at head. Each major supported release will have a snapshot of the -narrative and reference documentation that follows the lifecycle of Bazel's -version support. - -For now, to see documentation for older Bazel versions, go to -[docs.bazel.build](https://docs.bazel.build/). - - - To the Archives! - diff --git a/8.0.1/about/faq.mdx b/8.0.1/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/8.0.1/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/8.0.1/about/intro.mdx b/8.0.1/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/8.0.1/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/8.0.1/about/roadmap.mdx b/8.0.1/about/roadmap.mdx deleted file mode 100644 index 2e18b78..0000000 --- a/8.0.1/about/roadmap.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -As the Bazel project continues to evolve in response to your needs, we want to -share our 2024 update. - -This roadmap describes current initiatives and predictions for the future of -Bazel development, giving you visibility into current priorities and ongoing -projects. - -## Bazel 8.0 Release - -We plan to bring Bazel 8.0 [long term support -(LTS)](https://bazel.build/release/versioning) to you in late 2024. -The following features are planned to be implemented. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive -dependencies, allowing projects to scale while staying fast and -resource-efficient. - -With Bazel 8, we will disable WORKSPACE support by default (it will still be -possible to enable it using `--enable_workspace`); with Bazel 9 WORKSPACE -support will be removed. Starting with Bazel 7.1, you can set -`--noenable_workspace` to opt into the new behavior. - -Bazel 8.0 will contain a number of enhancements to -[Bazel's external dependency management] -(https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) -functionality, including: - -* The new flag `--enable_workspace` can be set to `false` to completely - disable WORKSPACE functionality. -* New directory watching API (see - [#21435](https://github.com/bazelbuild/bazel/pull/21435), shipped in Bazel - 7.1). -* Improved scheme for generating canonical repository names for better - cacheability of actions across dependency version updates. - ([#21316](https://github.com/bazelbuild/bazel/pull/21316), shipped in Bazel - 7.1) -* An improved shared repository cache (see - [#12227](https://github.com/bazelbuild/bazel/issues/12227)). -* Vendor and offline mode support — allows users to run builds with - pre-downloaded dependencies (see - [#19563](https://github.com/bazelbuild/bazel/issues/19563)). -* Reduced merge conflicts in lock files - ([#20396](https://github.com/bazelbuild/bazel/issues/20369)). -* Segmented MODULE.bazel - ([#17880](https://github.com/bazelbuild/bazel/issues/17880)) -* Allow overriding module extension generated repository - ([#19301](https://github.com/bazelbuild/bazel/issues/19301)) -* Improved documentation (e.g. - [#18030](https://github.com/bazelbuild/bazel/issues/18030), - [#15821](https://github.com/bazelbuild/bazel/issues/15821)) and migration - guide and migration tooling. - -### Remote execution improvements - -* Add support for asynchronous execution, speeding up remote execution by - increased parallelism with flag `--jobs`. -* Make it easier to debug cache misses by a new compact execution log, - reducing its size by 100x and its runtime overhead significantly (see - [#18643](https://github.com/bazelbuild/bazel/issues/18643)). -* Implement garbage collection for the disk cache (see - [#5139](https://github.com/bazelbuild/bazel/issues/5139)). -* Implement remote output service to allow lazy downloading of arbitrary build - outputs (see - [#20933](https://github.com/bazelbuild/bazel/discussions/20933)). - -### Migration of Android, C++, Java, Python, and Proto rules - -Complete migration of Android, C++, Java, and Python rulesets to dedicated -repositories and decoupling them from the Bazel releases. This effort allows -Bazel users and rule authors to - -* Update rules independently of Bazel. -* Update and customize rules as needed. - -The new location of the rulesets is going to be `bazelbuild/rules_android`, -`rules_cc`, `rules_java`, `rules_python` and `google/protobuf`. `rules_proto` is -going to be deprecated. - -Bazel 8 will provide a temporary migration flag that will automatically use the -rulesets that were previously part of the binary from their repositories. All -the users of those rulesets are expected to eventually depend on their -repositories and load them similarly to other rulesets that were never part of -Bazel. - -Bazel 8 will also improve on the existing extending rules and subrule APIs and -mark them as non-experimental. - -### Starlark improvements - -* Symbolic Macros are a new way of writing macros that is friendlier to - `BUILD` users, macro authors, and tooling. Compared to legacy macros, which - Bazel has only limited insight into, symbolic macros help users avoid common - pitfalls and enforce best practices. -* Package finalizers are a proposed feature for adding first-class support for - custom package validation logic. They are intended to help us deprecate - `native.existing_rules()`. - -### Configurability - -* Output path mapping continues to stabilize: promising better remote cache - performance and build speed for rule designers who use transitions. -* Automatically set build flags suitable for a given `--platforms`. -* Define project-supported flag combinations and automatically build targets - with default flags without having to set bazelrcs. -* Don't redo build analysis every time build flags change. - -### Project Skyfocus - minimize retained data structures - -Bazel holds a lot of state in RAM for fast incremental builds. However, -developers often change a small subset of the source files (e.g. almost never -one of the external dependencies). With Skyfocus, Bazel will provide an -experimental way to drop unnecessary incremental state and reduce Bazel's memory -footprint, while still providing the same fast incremental build experience. - -The initial scope aims to improve the retained heap metric only. Peak heap -reduction is a possibility, but not included in the initial scope. - -### Misc - -* Mobile install v3, a simpler and better maintained approach to incrementally - deploy Android applications. -* Garbage collection for repository caches and Bazel's `install_base`. -* Reduced sandboxing overhead. - -### Bazel-JetBrains* IntelliJ IDEA support - -Incremental IntelliJ plugin updates to support the latest JetBrains plugin -release. - -*This roadmap snapshots targets, and should not be taken as guarantees. -Priorities are subject to change in response to developer and customer -feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the -[Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/8.0.1/about/vision.mdx b/8.0.1/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/8.0.1/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/8.0.1/about/why.mdx b/8.0.1/about/why.mdx deleted file mode 100644 index 97cfa36..0000000 --- a/8.0.1/about/why.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Why Bazel?' ---- - - - -Bazel is a [fast](#fast), [correct](#correct), and [extensible](#extensible) -build tool with [integrated testing](#integrated-testing) that supports multiple -[languages](#multi-language), [repositories](#multi-repository), and -[platforms](#multi-platform) in an industry-leading [ecosystem](#ecosystem). - -## Bazel is fast - -Bazel knows exactly what input files each build command needs, avoiding -unnecessary work by re-running only when the set of input files have -changed between each build. -It runs build commands with as much parallelism as possible, either within the -same computer or on [remote build nodes](/remote/rbe). If the structure of build -allows for it, it can run thousands of build or test commands at the same time. - -This is supported by multiple caching layers, in memory, on disk and on the -remote build farm, if available. At Google, we routinely achieve cache hit rates -north of 99%. - -## Bazel is correct - -Bazel ensures that your binaries are built *only* from your own -source code. Bazel actions run in individual sandboxes and Bazel tracks -every input file of the build, only and always re-running build -commands when it needs to. This keeps your binaries up-to-date so that the -[same source code always results in the same binary](/basics/hermeticity), bit -by bit. - -Say goodbyte to endless `make clean` invocations and to chasing phantom bugs -that were in fact resolved in source code that never got built. - -## Bazel is extensible - -Harness the full power of Bazel by writing your own rules and macros to -customize Bazel for your specific needs across a wide range of projects. - -Bazel rules are written in [Starlark](/rules/language), our -in-house programming language that's a subset of Python. Starlark makes -rule-writing accessible to most developers, while also creating rules that can -be used across the ecosystem. - -## Integrated testing - -Bazel's [integrated test runner](/docs/user-manual#running-tests) -knows and runs only those tests needing to be re-run, using remote execution -(if available) to run them in parallel. Detect flakes early by using remote -execution to quickly run a test thousands of times. - -Bazel [provides facilities](/remote/bep) to upload test results to a central -location, thereby facilitating efficient communication of test outcomes, be it -on CI or by individual developers. - -## Multi-language support - -Bazel supports many common programming languages including C++, Java, -Kotlin, Python, Go, and Rust. You can build multiple binaries (for example, -backend, web UI and mobile app) in the same Bazel invocation without being -constrained to one language's idiomatic build tool. - -## Multi-repository support - -Bazel can [gather source code from multiple locations](/external/overview): you -don't need to vendor your dependencies (but you can!), you can instead point -Bazel to the location of your source code or prebuilt artifacts (e.g. a git -repository or Maven Central), and it takes care of the rest. - -## Multi-platform support - -Bazel can simultaneously build projects for multiple platforms including Linux, -macOS, Windows, and Android. It also provides powerful -[cross-compilation capabilities](/extending/platforms) to build code for one -platform while running the build on another. - -## Wide ecosystem - -[Industry leaders](/community/users) love Bazel, building a large -community of developers who use and contribute to Bazel. Find a tools, services -and documentation, including [consulting and SaaS offerings](/community/experts) -Bazel can use. Explore extensions like support for programming languages in -our [open source software repositories](/rules). diff --git a/8.0.1/advanced/performance/build-performance-breakdown.mdx b/8.0.1/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/8.0.1/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/8.0.1/advanced/performance/build-performance-metrics.mdx b/8.0.1/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/8.0.1/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/8.0.1/advanced/performance/iteration-speed.mdx b/8.0.1/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 2bbf839..0000000 --- a/8.0.1/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. - -To make this condition an error rather than a warning, you can use the -`--noallow_analysis_cache_discard` flag (introduced in Bazel 6.4.0) diff --git a/8.0.1/advanced/performance/json-trace-profile.mdx b/8.0.1/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 56e278c..0000000 --- a/8.0.1/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query, Bazel writes a profile into -the output base named `command-$INOVCATION_ID.profile.gz`, where -`$INOVCATION_ID` is the invocation identifier of the command. Bazel also creates -a symlink called `command.profile.gz` in the output base that points the profile -of the latest command. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Bazel keeps the last 5 profiles, configurable by -[`--profiles_to_retain`](/reference/command-line-reference#flag--generate_json_trace_profile), -in the output base by default for post-build analysis. Explicitly passing a -profile path with `--profile` disables automatic garbage collection. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/8.0.1/advanced/performance/memory.mdx b/8.0.1/advanced/performance/memory.mdx deleted file mode 100644 index 844e691..0000000 --- a/8.0.1/advanced/performance/memory.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -### Trade incremental build speeds for memory - -If your builds are too big, Bazel may throw an `OutOfMemoryError` (OOM) when -it doesn't have enough memory. You can make Bazel use less memory, at the cost -of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -### Trade build flexibility for memory with Skyfocus (Experimental) - -If you want to make Bazel use less memory *and* retain incremental build speeds, -you can tell Bazel the working set of files that you will be modifying, and -Bazel will only keep state needed to correctly incrementally rebuild changes to -those files. This feature is called **Skyfocus**. - -To use Skyfocus, pass the `--experimental_enable_skyfocus` flag: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus -``` - -By default, the working set will be the set of files next to the target being -built. In the example, all files in `//pkg` will be kept in the working set, and -changes to files outside of the working set will be disallowed, until you issue -`bazel clean` or restart the Bazel server. - -If you want to specify an exact set of files or directories, use the -`--experimental_working_set` flag, like so: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus ---experimental_working_set=path/to/another/dir,path/to/tests/dir -``` - -You can also pass `--experimental_skyfocus_dump_post_gc_stats` to show the -memory reduction amount: - -Putting it altogether, you should see something like this: - -```none -$ bazel test //pkg:target //tests/... --experimental_enable_skyfocus --experimental_working_set dir1,dir2,dir3/subdir --experimental_skyfocus_dump_post_gc_stats -INFO: --experimental_enable_skyfocus is enabled. Blaze will reclaim memory not needed to build the working set. Run 'blaze dump --skyframe=working_set' to show the working set, after this command. -WARNING: Changes outside of the working set will cause a build error. -INFO: Analyzed 149 targets (4533 packages loaded, 169438 targets configured). -INFO: Found 25 targets and 124 test targets... -INFO: Updated working set successfully. -INFO: Focusing on 334 roots, 3 leafs... (use --experimental_skyfocus_dump_keys to show them) -INFO: Heap: 1237MB -> 676MB (-45.31%) -INFO: Elapsed time: 192.670s ... -INFO: Build completed successfully, 62303 total actions -``` - -For this example, using Skyfocus allowed Bazel to drop 561MB (45%) of memory, -and incremental builds to handle changes to files under `dir1`, `dir2`, and -`dir3/subdir` will retain their fast speeds, with the tradeoff that Bazel cannot -rebuild changed files outside of these directories. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. diff --git a/8.0.1/basics/artifact-based-builds.mdx b/8.0.1/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/8.0.1/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/8.0.1/basics/build-systems.mdx b/8.0.1/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/8.0.1/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/8.0.1/basics/dependencies.mdx b/8.0.1/basics/dependencies.mdx deleted file mode 100644 index 1d3bf8f..0000000 --- a/8.0.1/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need -to have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the [`buildtools` -directory](https://github.com/bazelbuild/buildtools). - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive -dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel [enforces strict -transitive -dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version -Rule](https://opensource.google/docs/thirdparty/oneversion/) for -all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -Bazel did not use to automatically download transitive dependencies. It used to -employ a `WORKSPACE` file that required all transitive dependencies to be -listed, which led to a lot of pain when managing external dependencies. Bazel -has since added support for automatic transitive external dependency management -in the form of the `MODULE.bazel` file. See [external dependency -overview](/external/overview) for more details. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location that -is shared across engineers, so if a developer depends on an artifact that was -recently built by someone else, the build system automatically downloads it -instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) -goes down, because your entire build might grind to a halt if it’s unable to -download an external dependency. There’s also a security risk: if the -third-party system is compromised by an attacker, the attacker could replace the -referenced artifact with one of their own design, allowing them to inject -arbitrary code into your build. Both problems can be mitigated by mirroring any -artifacts you depend on onto servers you control and blocking your build system -from accessing third-party artifact repositories like Maven Central. The -trade-off is that these mirrors take effort and resources to maintain, so the -choice of whether to use them often depends on the scale of the project. The -security issue can also be completely prevented with little overhead by -requiring the hash of each third-party artifact to be specified in the source -repository, causing the build to fail if the artifact is tampered with. Another -alternative that completely sidesteps the issue is to vendor your project’s -dependencies. When a project vendors its dependencies, it checks them into -source control alongside the project’s source code, either as source or as -binaries. This effectively means that all of the project’s external dependencies -are converted to internal dependencies. Google uses this approach internally, -checking every third-party library referenced throughout Google into a -`third_party` directory at the root of Google’s source tree. However, this works -at Google only because Google’s source control system is custom built to handle -an extremely large monorepo, so vendoring might not be an option for all -organizations. diff --git a/8.0.1/basics/distributed-builds.mdx b/8.0.1/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/8.0.1/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/8.0.1/basics/hermeticity.mdx b/8.0.1/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/8.0.1/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/8.0.1/basics/index.mdx b/8.0.1/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/8.0.1/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/8.0.1/basics/task-based-builds.mdx b/8.0.1/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/8.0.1/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/8.0.1/brand/index.mdx b/8.0.1/brand/index.mdx deleted file mode 100644 index 2a21cd4..0000000 --- a/8.0.1/brand/index.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Bazel Brand Guidelines' ---- - - - -The Bazel trademark and logo ("Bazel Trademarks") are trademarks of Google, and -are treated separately from the copyright or patent license grants contained in -the Apache-licensed Bazel repositories on GitHub. Any use of the Bazel -Trademarks other than those permitted in these guidelines must be approved in -advance. - -## Purpose of the Brand Guidelines - -These guidelines exist to ensure that the Bazel project can share its technology -under open source licenses while making sure that the "Bazel" brand is protected -as a meaningful source identifier in a way that's consistent with trademark law. -By adhering to these guidelines, you help to promote the freedom to use and -develop high-quality Bazel technology. - -## Acceptable Uses - -Given the open nature of Bazel, you may use the Bazel trademark to refer to the -project without prior written permission. Examples of these approved references -include the following: - -* To refer to the Bazel Project itself; -* To link to bazel.build; -* To refer to unmodified source code or other files shared by the Bazel - repositories on GitHub; -* In blog posts, news articles, or educational materials about Bazel; -* To accurately identify that your design or implementation is based on, is - for use with, or is compatible with Bazel technology. - -Examples: - -* \[Your Product\] for Bazel -* \[Your Product\] is compatible with Bazel -* \[XYZ\] Conference for Bazel Users - -## General Guidelines - -* The Bazel name may never be used or registered in a manner that would cause - confusion as to Google's sponsorship, affiliation, or endorsement. -* Don't use the Bazel name as part of your company name, product name, domain - name, or social media profile. -* Other than as permitted by these guidelines, the Bazel name should not be - combined with other trademarks, terms, or source identifiers. -* Don't remove, distort or alter any element of the Bazel Trademarks. That - includes modifying the Bazel Trademark, for example, through hyphenation, - combination or abbreviation. Do not shorten, abbreviate, or create acronyms - out of the Bazel Trademarks. -* Don't display the word Bazel using any different stylization, color, or font - from the surrounding text. -* Don't use the term Bazel as a verb or use it in possessive form. -* Don't use the Bazel logo on any website, product UI, or promotional - materials without prior written permission from - [product@bazel.build](mailto:product@bazel.build). - -## Usage for Events and Community Groups - -The Bazel word mark may be used referentially in events, community groups, or -other gatherings related to the Bazel build system, but it may not be used in a -manner that implies official status or endorsement. - -Examples of appropriate naming conventions are: - -* \[XYZ\] Bazel User Group -* Bazel Community Day at \[XYZ\] -* \[XYZ\] Conference for Bazel Users - -where \[XYZ\] represents the location and optionally other wordings. - -Any naming convention that may imply official status or endorsement requires -review for approval from [product@bazel.build](mailto:product@bazel.build). - -Examples of naming conventions that require prior written permission: - -* BazelCon -* Bazel Conference - -## Contact Us - -Please do not hesitate to contact us at -[product@bazel.build](mailto:product@bazel.build) if you are unsure whether your -intended use of the Bazel Trademarks is in compliance with these guidelines, or -to ask for permission to use the Bazel Trademarks, clearly describing the -intended usage and duration. diff --git a/8.0.1/build/share-variables.mdx b/8.0.1/build/share-variables.mdx deleted file mode 100644 index b248034..0000000 --- a/8.0.1/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/8.0.1/build/style-guide.mdx b/8.0.1/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/8.0.1/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/8.0.1/community/recommended-rules.mdx b/8.0.1/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/8.0.1/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/8.0.1/community/remote-execution-services.mdx b/8.0.1/community/remote-execution-services.mdx deleted file mode 100644 index bede2b8..0000000 --- a/8.0.1/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [NativeLink](https://github.com/TraceMachina/nativelink) - -* Commercial - - * [Aspect Build](https://www.aspect.build/) – Self-hosted remote cache and remote execution services. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service with Build and Test UI. Can be self-hosted or hosted. diff --git a/8.0.1/community/roadmaps-starlark.mdx b/8.0.1/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/8.0.1/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/8.0.1/community/sig.mdx b/8.0.1/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/8.0.1/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/8.0.1/community/update.mdx b/8.0.1/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/8.0.1/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/8.0.1/concepts/build-ref.mdx b/8.0.1/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/8.0.1/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/8.0.1/concepts/platforms.mdx b/8.0.1/concepts/platforms.mdx deleted file mode 100644 index e560ea4..0000000 --- a/8.0.1/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--ios_multi_cpus=x86_64 --apple_platform_type=ios". - //platforms:ios - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - -flags: - # Maps "--ios_multi_cpus=x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/8.0.1/concepts/visibility.mdx b/8.0.1/concepts/visibility.mdx deleted file mode 100644 index 5b1bfd6..0000000 --- a/8.0.1/concepts/visibility.mdx +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. A target will fail -to build during the [analysis](/reference/glossary#analysis-phase) phase if it -violates the visibility of one of its dependencies. - -Generally, a target `A` is visible to a target `B` if they are in the same -location, or if `A` grants visibility to `B`'s location. In the absence of -[symbolic macros](/extending/macros), the term "location" can be simplified -to just "package"; see [below](#symbolic-macros) for more on symbolic macros. - -Visibility is specified by listing allowed packages. Allowing a package does not -necessarily mean that its subpackages are also allowed. For more details on -packages and subpackages, see [Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This shouldn't be done for production usage in -submitted code. - -The primary way to control visibility is with a rule's -[`visibility`](/reference/be/common-definitions#common.visibility) attribute. -The following subsections describe the attribute's format, how to apply it to -various kinds of targets, and the interaction between the visibility system and -symbolic macros. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that don't correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this location's package can use this target. - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets -declared in `//tests/BUILD`, but not by targets defined in -`//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -**Best practice:** When granting visibility to another team's project, prefer -`__subpackages__` over `__pkg__` to avoid needless visibility churn as that -project evolves and adds new subpackages. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is determined by taking its `visibility` attribute --- or a suitable default if not given -- and appending the location where the -target was declared. For targets not declared in a symbolic macro, if the -package specifies a [`default_visibility`](/reference/be/functions#package.default_visibility), -this default is used; for all other packages and for targets declared in a -symbolic macro, the default is just `["//visibility:private"]`. - -```starlark -# //mypkg/BUILD - -package(default_visibility = ["//friend:__pkg__"]) - -cc_library( - name = "t1", - ... - # No visibility explicitly specified. - # Effective visibility is ["//friend:__pkg__", "//mypkg:__pkg__"]. - # If no default_visibility were given in package(...), the visibility would - # instead default to ["//visibility:private"], and the effective visibility - # would be ["//mypkg:__pkg__"]. -) - -cc_library( - name = "t2", - ... - visibility = [":clients"], - # Effective visibility is ["//mypkg:clients, "//mypkg:__pkg__"], which will - # expand to ["//another_friend:__subpackages__", "//mypkg:__pkg__"]. -) - -cc_library( - name = "t3", - ... - visibility = ["//visibility:private"], - # Effective visibility is ["//mypkg:__pkg__"] -) - -package_group( - name = "clients", - packages = ["//another_friend/..."], -) -``` - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -```starlark -# //mypkg/BUILD - -java_binary( - name = "foo", - ... - visibility = ["//friend:__pkg__"], -) -``` - -```starlark -# //friend/BUILD - -some_rule( - name = "bar", - deps = [ - # Allowed directly by visibility of foo. - "//mypkg:foo", - # Also allowed. The java_binary's "_deploy.jar" implicit output file - # target the same visibility as the rule target itself. - "//mypkg:foo_deploy.jar", - ] - ... -) -``` - -### Source file target visibility - -Source file targets can either be explicitly declared using -[`exports_files`](/reference/be/functions#exports_files), or implicitly created -by referring to their filename in a label attribute of a rule (outside of a -symbolic macro). As with rule targets, the location of the call to -`exports_files`, or the BUILD file that referred to the input file, is always -automatically appended to the file's visibility. - -Files declared by `exports_files` can have their visibility set by the -`visibility` parameter to that function. If this parameter is not given, the visibility is public. - -Note: `exports_files` may not be used to override the visibility of a generated -file. - -For files that do not appear in a call to `exports_files`, the visibility -depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is true, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```starlark -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```starlark -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -### Visibility and symbolic macros - -This section describes how the visibility system interacts with -[symbolic macros](/extending/macros). - -#### Locations within symbolic macros - -A key detail of the visibility system is how we determine the location of a -declaration. For targets that are not declared in a symbolic macro, the location -is just the package where the target lives -- the package of the `BUILD` file. -But for targets created in a symbolic macro, the location is the package -containing the `.bzl` file where the macro's definition (the -`my_macro = macro(...)` statement) appears. When a target is created inside -multiple nested targets, it is always the innermost symbolic macro's definition -that is used. - -The same system is used to determine what location to check against a given -dependency's visibility. If the consuming target was created inside a macro, we -look at the innermost macro's definition rather than the package the consuming -target lives in. - -This means that all macros whose code is defined in the same package are -automatically "friends" with one another. Any target directly created by a macro -defined in `//lib:defs.bzl` can be seen from any other macro defined in `//lib`, -regardless of what packages the macros are actually instantiated in. Likewise, -they can see, and can be seen by, targets declared directly in `//lib/BUILD` and -its legacy macros. Conversely, targets that live in the same package cannot -necessarily see one another if at least one of them is created by a symbolic -macro. - -Within a symbolic macro's implementation function, the `visibility` parameter -has the effective value of the macro's `visibility` attribute after appending -the location where the macro was called. The standard way for a macro to export -one of its targets to its caller is to forward this value along to the target's -declaration, as in `some_rule(..., visibility = visibility)`. Targets that omit -this attribute won't be visible to the caller of the macro unless the caller -happens to be in the same package as the macro definition. This behavior -composes, in the sense that a chain of nested calls to submacros may each pass -`visibility = visibility`, re-exporting the inner macro's exported targets to -the caller at each level, without exposing any of the macros' implementation -details. - -#### Delegating privileges to a submacro - -The visibility model has a special feature to allow a macro to delegate its -permissions to a submacro. This is important for factoring and composing macros. - -Suppose you have a macro `my_macro` that creates a dependency edge using a rule -`some_library` from another package: - -```starlark -# //macro/defs.bzl -load("//lib:defs.bzl", "some_library") - -def _impl(name, visibility, ...): - ... - native.genrule( - name = name + "_dependency" - ... - ) - some_library( - name = name + "_consumer", - deps = [name + "_dependency"], - ... - ) - -my_macro = macro(implementation = _impl, ...) -``` - -```starlark -# //pkg/BUILD - -load("//macro:defs.bzl", "my_macro") - -my_macro(name = "foo", ...) -``` - -The `//pkg:foo_dependency` target has no `visibility` specified, so it is only -visible within `//macro`, which works fine for the consuming target. Now, what -happens if the author of `//lib` refactors `some_library` to instead be -implemented using a macro? - -```starlark -# //lib:defs.bzl - -def _impl(name, visibility, deps, ...): - some_rule( - # Main target, exported. - name = name, - visibility = visibility, - deps = deps, - ...) - -some_library = macro(implementation = _impl, ...) -``` - -With this change, `//pkg:foo_consumer`'s location is now `//lib` rather than -`//macro`, so its usage of `//pkg:foo_dependency` violates the dependency's -visibility. The author of `my_macro` can't be expected to pass -`visibility = ["//lib"]` to the declaration of the dependency just to work -around this implementation detail. - -For this reason, when a dependency of a target is also an attribute value of the -macro that declared the target, we check the dependency's visibility against the -location of the macro instead of the location of the consuming target. - -In this example, to validate whether `//pkg:foo_consumer` can see -`//pkg:foo_dependency`, we see that `//pkg:foo_dependency` was also passed as an -input to the call to `some_library` inside of `my_macro`, and instead check the -dependency's visibility against the location of this call, `//macro`. - -This process can repeat recursively, as long as a target or macro declaration is -inside of another symbolic macro taking the dependency's label in one of its -label-typed attributes. - -Note: Visibility delegation does not work for labels that were not passed into -the macro, such as labels derived by string manipulation. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target declarations into a macro in a `.bzl` file. Without the protection of -load visibility, they might find their macro reused by other collaborators in -the same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```starlark -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```starlark -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```starlark -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```starlark -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```starlark -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```starlark -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```starlark -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```starlark -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```starlark -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```starlark -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/8.0.1/configure/attributes.mdx b/8.0.1/configure/attributes.mdx deleted file mode 100644 index 7bc3f41..0000000 --- a/8.0.1/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.0.1/configure/best-practices.mdx b/8.0.1/configure/best-practices.mdx deleted file mode 100644 index abef72e..0000000 --- a/8.0.1/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `MODULE.bazel` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/8.0.1/configure/coverage.mdx b/8.0.1/configure/coverage.mdx deleted file mode 100644 index 9a50db0..0000000 --- a/8.0.1/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --branch-coverage --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/sphinx/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/8.0.1/contribute/breaking-changes.mdx b/8.0.1/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/8.0.1/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/8.0.1/contribute/codebase.mdx b/8.0.1/contribute/codebase.mdx deleted file mode 100644 index 8a13611..0000000 --- a/8.0.1/contribute/codebase.mdx +++ /dev/null @@ -1,1670 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `exec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". With complex SkyFunctions, state management - between restarts can get tricky, so - [`StateMachine`s](/contribute/statemachine-guide) were introduced for a - structured approach to logical concurrency, including hooks to suspend and - resume hierarchical computations within a `SkyFunction`. Example: - [`DependencyResolver#computeDependencies`][statemachine_example] - uses a `StateMachine` with `getState()` to compute the potentially huge set - of direct dependencies of a configured target, which otherwise can result in - expensive restarts. - -[statemachine_example]: https://developers.google.com/devsite/reference/markdown/links#reference_links - -Fundamentally, Bazel need these types of workarounds because hundreds of -thousands of in-flight Skyframe nodes is common, and Java's support of -lightweight threads [does not outperform][virtual_threads] the -`StateMachine` implementation as of 2023. - -[virtual_threads]: /contribute/statemachine-guide#epilogue_eventually_removing_callbacks - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to learn things about other configured targets that they depend on, and the only -way to tell things about themselves to other configured targets that depend on -them. The reason why "transitive" is in their name is that this is usually some -sort of roll-up of the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source or output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where an -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It was implemented in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can limit which -targets can depend on a particular target using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used for one -special case: - -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is _mostly_ written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/8.0.1/contribute/design-documents.mdx b/8.0.1/contribute/design-documents.mdx deleted file mode 100644 index 1fe70b9..0000000 --- a/8.0.1/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, MODULE.bazel, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/8.0.1/contribute/docs-style-guide.mdx b/8.0.1/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/8.0.1/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/8.0.1/contribute/docs.mdx b/8.0.1/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/8.0.1/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/8.0.1/contribute/index.mdx b/8.0.1/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/8.0.1/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/8.0.1/contribute/maintainers-guide.mdx b/8.0.1/contribute/maintainers-guide.mdx deleted file mode 100644 index d5edf45..0000000 --- a/8.0.1/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [meisterT](https://github.com/meisterT) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [buildbreaker2021](https://github.com/buildbreaker2021) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/8.0.1/contribute/naming.mdx b/8.0.1/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/8.0.1/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/8.0.1/contribute/patch-acceptance.mdx b/8.0.1/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/8.0.1/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/8.0.1/contribute/policy.mdx b/8.0.1/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/8.0.1/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/8.0.1/contribute/release-notes.mdx b/8.0.1/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/8.0.1/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/8.0.1/contribute/statemachine-guide.mdx b/8.0.1/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/8.0.1/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/8.0.1/contribute/windows-chocolatey-maintenance.mdx b/8.0.1/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/8.0.1/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/8.0.1/contribute/windows-scoop-maintenance.mdx b/8.0.1/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/8.0.1/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/8.0.1/docs/android-build-performance.mdx b/8.0.1/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/8.0.1/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/8.0.1/docs/android-instrumentation-test.mdx b/8.0.1/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/8.0.1/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/8.0.1/docs/android-ndk.mdx b/8.0.1/docs/android-ndk.mdx deleted file mode 100644 index b10a566..0000000 --- a/8.0.1/docs/android-ndk.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the target ABI - -To configure the target ABI, use the `--android_platforms` flag as follows: - -```posix-terminal -bazel build //:app --android_platforms={{ "" }}comma-separated list of platforms{{ "" }} -``` - -Just like the `--platforms` flag, the values passed to `--android_platforms` are -the labels of [`platform`](https://bazel.build/reference/be/platforms-and-toolchains#platform) -targets, using standard constraint values to describe your device. - -For example, for an Android device with a 64-bit ARM processor, you'd define -your platform like this: - -```py -platform( - name = "android_arm64", - constraint_values = [ - "@platforms//os:android", - "@platforms//cpu:arm64", - ], -) -``` - -Every Android `platform` should use the [`@platforms//os:android`](https://github.com/bazelbuild/platforms/blob/33a3b209f94856193266871b1545054afb90bb28/os/BUILD#L36) -OS constraint. To migrate the CPU constraint, check this chart: - -CPU Value | Platform -------------- | ------------------------------------------ -`armeabi-v7a` | `@platforms//cpu:armv7` -`arm64-v8a` | `@platforms//cpu:arm64` -`x86` | `@platforms//cpu:x86_32` -`x86_64` | `@platforms//cpu:x86_64` - -And, of course, for a multi-architecture APK, you pass multiple labels, for -example: `--android_platforms=//:arm64,//:x86_64` (assuming you defined those in -your top-level `BUILD.bazel` file). - -Bazel is unable to select a default Android platform, so one must be defined and -specified with `--android_platforms`. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | -| C++17 | `--cxxopt=-std=c++17` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--platforms` flag. - -For example, assuming you have defined Android platforms in -`my/platforms/BUILD`: - -```posix-terminal -bazel build //my/cc/jni:target \ - --platforms=//my/platforms:x86_64 -``` - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --platforms=//my/platforms:x86 - -common:android_armeabi-v7a --platforms=//my/platforms:armeabi-v7a - -# In general -common:android_ --platforms=//my/platforms: -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/8.0.1/docs/bazel-and-android.mdx b/8.0.1/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/8.0.1/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/8.0.1/docs/bazel-and-apple.mdx b/8.0.1/docs/bazel-and-apple.mdx deleted file mode 100644 index 6e4a06f..0000000 --- a/8.0.1/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/8.0.1/docs/bazel-and-cpp.mdx b/8.0.1/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/8.0.1/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/8.0.1/docs/bazel-and-java.mdx b/8.0.1/docs/bazel-and-java.mdx deleted file mode 100644 index e9476aa..0000000 --- a/8.0.1/docs/bazel-and-java.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using the `local_java_repository` or -`remote_java_repository` repo rules in a module extension. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@rules_java//toolchains:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@rules_java//toolchains:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `MODULE.bazel` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@rules_java//toolchains:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@rules_java//toolchains:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@rules_java//toolchains:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/8.0.1/docs/bazel-and-javascript.mdx b/8.0.1/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/8.0.1/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/8.0.1/docs/configurable-attributes.mdx b/8.0.1/docs/configurable-attributes.mdx deleted file mode 100644 index 3515852..0000000 --- a/8.0.1/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.0.1/docs/sandboxing.mdx b/8.0.1/docs/sandboxing.mdx deleted file mode 100644 index 6869795..0000000 --- a/8.0.1/docs/sandboxing.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel and debugging your sandboxing -environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/8.0.1/extending/aspects.mdx b/8.0.1/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/8.0.1/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/8.0.1/extending/auto-exec-groups.mdx b/8.0.1/extending/auto-exec-groups.mdx deleted file mode 100644 index abba3d5..0000000 --- a/8.0.1/extending/auto-exec-groups.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 'Automatic Execution Groups (AEGs)' ---- - - -Automatic execution groups select an [execution platform][exec_platform] -for each toolchain type. In other words, one target can have multiple -execution platforms without defining execution groups. - -## Quick summary - -Automatic execution groups are closely connected to toolchains. If you are using -toolchains, you need to set them on the affected actions (actions which use an -executable or a tool from a toolchain) by adding `toolchain` parameter. For -example: - -```python -ctx.actions.run( - ..., - executable = ctx.toolchain['@bazel_tools//tools/jdk:toolchain_type'].tool, - ..., - toolchain = '@bazel_tools//tools/jdk:toolchain_type', -) -``` -If the action does not use a tool or executable from a toolchain, and Blaze -doesn't detect that ([the error](#first-error-message) is raised), you can set -`toolchain = None`. - -If you need to use multiple toolchains on a single execution platform (an action -uses executable or tools from two or more toolchains), you need to manually -define [exec_groups][exec_groups] (check -[When should I use a custom exec_group?][multiple_toolchains_exec_groups] -section). - -## History - -Before AEGs, the execution platform was selected on a rule level. For example: - -```python -my_rule = rule( - _impl, - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], -) -``` - -Rule `my_rule` registers two toolchain types. This means that the [Toolchain -Resolution](https://bazel.build/extending/toolchains#toolchain-resolution) used -to find an execution platform which supports both toolchain types. The selected -execution platform was used for each registered action inside the rule, unless -specified differently with [exec_groups][exec_groups]. -In other words, all actions inside the rule used to have a single execution -platform even if they used tools from different toolchains (execution platform -is selected for each target). This resulted in failures when there was no -execution platform supporting all toolchains. - -## Current state - -With AEGs, the execution platform is selected for each toolchain type. The -implementation function of the earlier example, `my_rule`, would look like: - -```python -def _impl(ctx): - ctx.actions.run( - mnemonic = "First action", - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - toolchain = '//tools:toolchain_type_1', - ) - - ctx.actions.run( - mnemonic = "Second action", - executable = ctx.toolchain['//tools:toolchain_type_2'].tool, - toolchain = '//tools:toolchain_type_2', - ) -``` - -This rule creates two actions, the `First action` which uses executable from a -`//tools:toolchain_type_1` and the `Second action` which uses executable from a -`//tools:toolchain_type_2`. Before AEGs, both of these actions would be executed -on a single execution platform which supports both toolchain types. With AEGs, -by adding the `toolchain` parameter inside the actions, each action executes on -the execution platform that provides the toolchain. The actions may be executed -on different execution platforms. - -The same is effective with [ctx.actions.run_shell][run_shell] where `toolchain` -parameter should be added when `tools` are from a toolchain. - -## Difference between custom exec groups and automatic exec groups - -As the name suggests, AEGs are exec groups created automatically for each -toolchain type registered on a rule. There is no need to manually specify them, -unlike the "classic" exec groups. - -### When should I use a custom exec_group? - -Custom exec_groups are needed only in case where multiple toolchains need to -execute on a single execution platform. In all other cases there's no need to -define custom exec_groups. For example: - -```python -def _impl(ctx): - ctx.actions.run( - ..., - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - tools = [ctx.toolchain['//tools:toolchain_type_2'].tool], - exec_group = 'two_toolchains', - ) -``` - -```python -my_rule = rule( - _impl, - exec_groups = { - "two_toolchains": exec_group( - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], - ), - } -) -``` - -## Migration of AEGs - -Internally in google3, Blaze is already using AEGs. -Externally for Bazel, migration is in the process. Some rules are already using -this feature (e.g. Java and C++ rules). - -### Which Bazel versions support this migration? - -AEGs are fully supported from Bazel 7. - -### How to enable AEGs? - -Set `--incompatible_auto_exec_groups` to true. More information about the flag -on [the GitHub issue][github_flag]. - -### How to enable AEGs inside a particular rule? - -Set the `_use_auto_exec_groups` attribute on a rule. - -```python -my_rule = rule( - _impl, - attrs = { - "_use_auto_exec_groups": attr.bool(default = True), - } -) -``` -This enables AEGs only in `my_rule` and its actions start using the new logic -when selecting the execution platform. Incompatible flag is overridden with this -attribute. - -### How to disable AEGs in case of an error? - -Set `--incompatible_auto_exec_groups` to false to completely disable AEGs in -your project ([flag's GitHub issue][github_flag]), or disable a particular rule -by setting `_use_auto_exec_groups` attribute to `False` -([more details about the attribute](#how-enable-particular-rule)). - -### Error messages while migrating to AEGs - -#### Couldn't identify if tools are from implicit dependencies or a toolchain. Please set the toolchain parameter. If you're not using a toolchain, set it to 'None'. - * In this case you get a stack of calls before the error happened and you can - clearly see which exact action needs the toolchain parameter. Check which - toolchain is used for the action and set it with the toolchain param. If no - toolchain is used inside the action for tools or executable, set it to - `None`. - -#### Action declared for non-existent toolchain '[toolchain_type]'. - * This means that you've set the toolchain parameter on the action but didn't -register it on the rule. Register the toolchain or set `None` inside the action. - -## Additional material - -For more information, check design document: -[Automatic exec groups for toolchains][aegs_design_doc]. - -[exec_platform]: https://bazel.build/extending/platforms#:~:text=Execution%20%2D%20a%20platform%20on%20which%20build%20tools%20execute%20build%20actions%20to%20produce%20intermediate%20and%20final%20outputs. -[exec_groups]: https://bazel.build/extending/exec-groups -[github_flag]: https://github.com/bazelbuild/bazel/issues/17134 -[aegs_design_doc]: https://docs.google.com/document/d/1-rbP_hmKs9D639YWw5F_JyxPxL2bi6dSmmvj_WXak9M/edit#heading=h.5mcn15i0e1ch -[run_shell]: https://bazel.build/rules/lib/builtins/actions#run_shell -[multiple_toolchains_exec_groups]: /extending/auto-exec-groups#when-should-use-exec-groups diff --git a/8.0.1/extending/concepts.mdx b/8.0.1/extending/concepts.mdx deleted file mode 100644 index eb1d6b8..0000000 --- a/8.0.1/extending/concepts.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A macro is a function that instantiates rules. Macros come in two flavors: -[symbolic macros](/extending/macros) (new in Bazel 8) and [legacy -macros](/extending/legacy-macros). The two flavors of macros are defined -differently, but behave almost the same from the point of view of a user. A -macro is useful when a `BUILD` file is getting too repetitive or too complex, as -it lets you reuse some code. The function is evaluated as soon as the `BUILD` -file is read. After the evaluation of the `BUILD` file, Bazel has little -information about macros. If your macro generates a `genrule`, Bazel will -behave *almost* as if you declared that `genrule` in the `BUILD` file. (The one -exception is that targets declared in a symbolic macro have [special visibility -semantics](/extending/macros#visibility): a symbolic macro can hide its internal -targets from the rest of the package.) - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro; we recommend a symbolic -macro, unless you need to support older Bazel versions. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most users will -never have to write one; they will only load and call existing rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to reuse some code. - Then [learn more about macros](/extending/macros) and [using them to create - "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you -may want to write [aspects](/extending/aspects) and [repository -rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/8.0.1/extending/depsets.mdx b/8.0.1/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/8.0.1/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/8.0.1/extending/exec-groups.mdx b/8.0.1/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/8.0.1/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/8.0.1/extending/platforms.mdx b/8.0.1/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/8.0.1/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/8.0.1/extending/repo.mdx b/8.0.1/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/8.0.1/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/8.0.1/extending/rules.mdx b/8.0.1/extending/rules.mdx deleted file mode 100644 index c91939e..0000000 --- a/8.0.1/extending/rules.mdx +++ /dev/null @@ -1,1244 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`genrule` and `filegroup`, provide some core support. -By defining your own rules, you can add support for languages and tools -that Bazel doesn't support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function doesn't run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [rule kind](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable = True`), or specifically -a test executable (with `test = True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule can't be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user doesn't -specify it in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which can't be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can't actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency using that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files using a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -There's a legacy struct style, which is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel doesn't -perform file I/O during the analysis phase, these objects can't be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files can't be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies using that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file = True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable = True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). -Often, the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive = [headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with = ",") - args.add_joined("-s", srcs, join_with = ",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it can't depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs won't be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which decides which actions to -execute. It is an error if there is a cycle in the dependency graph. Creating -an action doesn't guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of provider objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions won't be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields = { - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }, -) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances satisfy certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments don't match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# Keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {"files_to_link": files_to_link, "headers": all_headers} - -ExampleInfo, _new_exampleinfo = provider( - fields = ["files_to_link", "headers"], - init = _exampleinfo_init, -) -``` - -A rule implementation may then instantiate the provider as follows: - -```python -ExampleInfo( - files_to_link = my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers -) -``` - -The raw constructor can be used to define alternative public factory functions -that don't go through the `init` logic. For example, exampleinfo.bzl -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code can't load it and -generate arbitrary provider instances. - -Another use for `init` is to prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable = True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you don't specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it doesn't support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes can't be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size = "small", **kwargs): - _example_test(size = size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment, or command line -argument or flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs using [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they don't define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built-in the -"target" configuration, while tools that should run on the execution platform -are built-in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable = True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg = "exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg = "target"` in -the attribute. - -`cfg = "target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable = False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg = my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp` and `java`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns -`True` in coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"exec"` in the attribute schema. in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules can't do this validation at analysis time because rules can't read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action doesn't produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action won't be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path], - ) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec" - ), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel doesn't -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a [`struct`](/rules/lib/builtins/struct) returned by the rule's -implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Such providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -*This style is deprecated and should not be used in new code;* see following for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x = "foo", ...) - modern_data = MyInfo(y = "bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus don't create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the preceding syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept or require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/8.0.1/extending/toolchains.mdx b/8.0.1/extending/toolchains.mdx deleted file mode 100644 index b904cbe..0000000 --- a/8.0.1/extending/toolchains.mdx +++ /dev/null @@ -1,600 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as -input a set of required toolchain types, the target platform, the list of -available execution platforms, and the list of available toolchains. Its outputs -are a selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. The list of available toolchains is filtered to remove any toolchains - specifying `target_settings` that don't match the current configuration. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/8.0.1/external/advanced.mdx b/8.0.1/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/8.0.1/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/8.0.1/external/lockfile.mdx b/8.0.1/external/lockfile.mdx deleted file mode 100644 index f2a75b2..0000000 --- a/8.0.1/external/lockfile.mdx +++ /dev/null @@ -1,277 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/8.0.1/external/module.mdx b/8.0.1/external/module.mdx deleted file mode 100644 index 6a9cf13..0000000 --- a/8.0.1/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}+{{ "" }}version{{ -"" }}` (for example, `bazel_skylib+1.0.3`) or `{{ "" }}module_name{{ -"" }}+` (for example, `bazel_features+`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/8.0.1/help.mdx b/8.0.1/help.mdx deleted file mode 100644 index b2976e6..0000000 --- a/8.0.1/help.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2023](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsefrwb_ySGRi_bvQejpO_Tj) - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/8.0.1/install/bazelisk.mdx b/8.0.1/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/8.0.1/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/8.0.1/install/compile-source.mdx b/8.0.1/install/compile-source.mdx deleted file mode 100644 index a228b22..0000000 --- a/8.0.1/install/compile-source.mdx +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/8.0.1/install/completion.mdx b/8.0.1/install/completion.mdx deleted file mode 100644 index 856784c..0000000 --- a/8.0.1/install/completion.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/8.0.1/install/docker-container.mdx b/8.0.1/install/docker-container.mdx deleted file mode 100644 index 3a5d017..0000000 --- a/8.0.1/install/docker-container.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -ubuntu@5a99103747c6:~$ git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -ubuntu@5a99103747c6:~$ -``` - -Explore the container contents. - -```posix-terminal -ubuntu@5a99103747c6:~$ gcc --version -gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 -Copyright (C) 2019 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -ubuntu@5a99103747c6:~$ java -version -openjdk version "1.8.0_362" -OpenJDK Runtime Environment (build 1.8.0_362-8u372-ga~us1-0ubuntu1~20.04-b09) -OpenJDK 64-Bit Server VM (build 25.362-b09, mixed mode) - -ubuntu@5a99103747c6:~$ python -V -Python 3.8.10 - -ubuntu@5a99103747c6:~$ bazel version -WARNING: Invoking Bazel in batch mode since it is not invoked from within a workspace (below a directory having a WORKSPACE file). -Extracting Bazel installation... -Build label: 6.2.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Jun 2 16:59:58 2023 (1685725198) -Build timestamp: 1685725198 -Build timestamp as int: 1685725198 -``` - -## Explore the Bazel Dockerfile - -If you want to check how the Bazel Docker image is built, you can find its Dockerfile at [bazelbuild/continuous-integration/bazel/oci](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). diff --git a/8.0.1/install/ide.mdx b/8.0.1/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/8.0.1/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/8.0.1/install/index.mdx b/8.0.1/install/index.mdx deleted file mode 100644 index 10f53c4..0000000 --- a/8.0.1/install/index.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -You can find available Bazel releases on our [release page](/release). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Homebrew](https://formulae.brew.sh/formula/bazel) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/8.0.1/install/os-x.mdx b/8.0.1/install/os-x.mdx deleted file mode 100644 index 9a0f3f8..0000000 --- a/8.0.1/install/os-x.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" - -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/8.0.1/install/suse.mdx b/8.0.1/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/8.0.1/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/8.0.1/install/ubuntu.mdx b/8.0.1/install/ubuntu.mdx deleted file mode 100644 index a31bd2f..0000000 --- a/8.0.1/install/ubuntu.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y - -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg - -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings - -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel - -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/8.0.1/migrate/index.mdx b/8.0.1/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/8.0.1/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/8.0.1/migrate/maven.mdx b/8.0.1/migrate/maven.mdx deleted file mode 100644 index 38aaffc..0000000 --- a/8.0.1/migrate/maven.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences between Maven -and Bazel, and provides a migration example using the Guava project. - -When migrating from any build tool to Bazel, it's best to have both build tools -running in parallel until you have fully migrated your development team, CI -system, and any other relevant systems. You can run Maven and Bazel in the same -repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -, it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial [Introduction to Bazel: - Build Java](/start/java) before you start migrating. The tutorial explains - Bazel's concepts, structure, and label syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build files - and multiple targets per `BUILD` file, allowing for builds that are more - incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does not - automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java - package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the MODULE.bazel file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the [Guava -project](https://github.com/google/guava) from Maven to Bazel. The -Guava project used is release `v31.1`. The examples using Guava do not walk -through each step in the migration, but they do show the files and contents that -are generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the MODULE.bazel file - -Create a file named `MODULE.bazel` at the root of your project. If your project -has no external dependencies, this file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the MODULE.bazel -file. You can use `rules_jvm_external` to manage dependencies from Maven. For -instructions about using this ruleset, see [the -README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external) -. - -#### Guava project example: external dependencies - -You can list the external dependencies of the [Guava -project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_jvm_external", version = "6.2") -maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") -maven.install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -use_repo(maven, "maven") -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your -project should be built. Unlike Maven with its one `pom.xml` file, Bazel can use -many `BUILD` files to build a project. These files specify multiple build -targets, which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file at the root of -your project and using it to do an initial build using Bazel. Then, you refine -your build by adding more `BUILD` files with more granular targets. - -1. In the same directory as your `MODULE.bazel` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to build - your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples - above, the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. - * Take a look at the [example below of this top-level BUILD - file](#guava-2) from the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build your - project to ensure that it works. On the command line, from your workspace - directory, use `bazel build //:everything` to build your project with Bazel. - - The project has now been successfully built with Bazel. You will need to add - more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used to -build the entire project. Here are the contents of this initial `BUILD` file in -the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - javacopts = ["-XepDisableAllChecks"], - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your -first build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with Java - packages that have the fewest dependencies and work you way up to packages - with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number of packages - grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add a - `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build file. - Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel to -produce all of your builds. diff --git a/8.0.1/migrate/xcode.mdx b/8.0.1/migrate/xcode.mdx deleted file mode 100644 index 986cd11..0000000 --- a/8.0.1/migrate/xcode.mdx +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps for -converting an Xcode project to a Bazel project. It also provides troubleshooting -solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present within - the workspace directory or specified as dependencies in the `MODULE.bazel` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the [iOS app - tutorial](/start/ios-app)). You should understand the Bazel workspace, - including the `MODULE.bazel` and `BUILD` files, as well as the concepts of - targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see [Working with external -dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `MODULE.bazel` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `MODULE.bazel` file - -Create a `MODULE.bazel` file in a new directory. This directory becomes the -Bazel workspace root. If the project uses no external dependencies, this file -can be empty. If the project depends on files or packages that are not in one of -the project's directories, specify these external dependencies in the -`MODULE.bazel` file. - -Note: Place the project source code within the directory tree containing the -`MODULE.bazel` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with -[swift_bazel](https://github.com/cgrindel/swift_bazel), you must -convert them into Bazel packages as described in the [following -tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/) -. - -Note: SwiftPM support is a manual process with many variables. SwiftPM -integration with Bazel has not been fully verified and is not officially -supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create the -`BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see [Workspaces, -packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a -[`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an -[`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build -rules](https://github.com/bazelbuild/rules_apple) support running -unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) - to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for - [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), - [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) - and - [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) target for each -Objective-C library and a -[`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) function to include -all sources and/or headers of a certain type. Use it carefully as it might -include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in -the [rules_apple examples -directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For -example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - -For more information on build rules, see [Apple Rules for -Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number of - packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix build - errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `MODULE.bazel` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features) -. - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/8.0.1/query/aquery.mdx b/8.0.1/query/aquery.mdx deleted file mode 100644 index 2176ff6..0000000 --- a/8.0.1/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noslim_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/8.0.1/query/cquery.mdx b/8.0.1/query/cquery.mdx deleted file mode 100644 index bd829c8..0000000 --- a/8.0.1/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but -[`allrdeps`](/query/language#allrdeps), -[`buildfiles`](/query/language#buildfiles), -[`rbuildfiles`](/query/language#rbuildfiles), -[`siblings`](/query/language#siblings), [`tests`](/query/language#tests), and -[`visible`](/query/language#visible). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a previous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genrule` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/8.0.1/reference/glossary.mdx b/8.0.1/reference/glossary.mdx deleted file mode 100644 index 3b0b497..0000000 --- a/8.0.1/reference/glossary.mdx +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Legacy macro - -A flavor of [macro](#macro) which is declared as an ordinary -[Starlark](#starlark) function, and which runs as a side effect of executing a -`BUILD` file. - -Legacy macros can do anything a function can. This means they can be convenient, -but they can also be harder to read, write, and use. A legacy macro might -unexpectedly mutate its arguments or fail when given a `select()` or ill-typed -argument. - -Contrast with [symbolic macros](#symbolic-macro). - -**See also:** [Legacy macro documentation](/extending/legacy-macros) - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) callable. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -Comes in two flavors: [symbolic macros](#symbolic-macro) (since Bazel 8) and -[legacy macros](#legacy-macro). - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Symbolic macro - -A flavor of [macro](#macro) which is declared with a [rule](#rule)-like -[attribute](#attribute) schema, allows hiding internal declared -[targets](#target) from their own package, and enforces a predictable naming -pattern on the targets that the macro declares. Designed to avoid some of the -problems seen in large [legacy macro](#legacy-macro) codebases. - -**See also:** [Symbolic macro documentation](/extending/macros) - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/8.0.1/reference/skyframe.mdx b/8.0.1/reference/skyframe.mdx deleted file mode 100644 index ba9149f..0000000 --- a/8.0.1/reference/skyframe.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build is achieved by evaluating the node that represents the build request. - -First, Bazel finds the `SkyFunction` corresponding to the key of the top-level -`SkyKey`. The function then requests the evaluation of the nodes it needs to -evaluate the top-level node, which in turn result in other `SkyFunction` calls, -until the leaf nodes are reached. Leaf nodes are usually ones that represent -input files in the file system. Finally, Bazel ends up with the value of the -top-level `SkyValue`, some side effects (such as output files in the file -system) and a directed acyclic graph of the dependencies between the nodes -involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in -advance all of the nodes it needs to do its job. A simple example is evaluating -an input file node that turns out to be a symlink: the function tries to read -the file, realizes that it is a symlink, and thus fetches the file system node -representing the target of the symlink. But that itself can be a symlink, in -which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the -services provided to it by an interface called `SkyFunction.Environment`. These -are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If - the node is available, its value is returned, otherwise, `null` is returned - and the function itself is expected to return `null`. In the latter case, - the dependent node is evaluated, and then the original node builder is - invoked again, but this time the same `env.getValue` call will return a - non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. - This does essentially the same, except that the dependent nodes are - evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs - to be taken that two different functions avoid stepping on each other's - toes. In general, write side effects (where data flows outwards from Bazel) - are okay, read side effects (where data flows inwards into Bazel without a - registered dependency) are not, because they are an unregistered dependency - and as such, can cause incorrect incremental builds. - -Well-behaved `SkyFunction` implementations avoid accessing data in any other way -than requesting dependencies (such as by directly reading the file system), -because that results in Bazel not registering the data dependency on the file -that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` -value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on - other nodes, Bazel can guarantee that if the input state is the same, the - same data is returned. If all sky functions are deterministic, this means - that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions - is recorded, Bazel can invalidate only the exact set of nodes that need to - be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of - requesting dependencies, functions that don't depend on each other can be - run in parallel and Bazel can guarantee that the result is the same as if - they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel -can build up a complete data flow graph from the input files to the output -files, and use this information to only rebuild those nodes that actually need -to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one -and the top-down one. Which one is optimal depends on how the dependency graph -looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed - inputs is known, all the nodes are invalidated that transitively depend on - changed files. This is optimal if the same top-level node will be built - again. Note that bottom-up invalidation requires running `stat()` on all - input files of the previous build to determine if they were changed. This - can be improved by using `inotify` or a similar mechanism to learn about - changed files. - -* During top-down invalidation, the transitive closure of the top-level node - is checked and only those nodes are kept whose transitive closure is clean. - This is better if the node graph is large, but the next build only needs a - small subset of it: bottom-up invalidation would invalidate the larger graph - of the first build, unlike top-down invalidation, which just walks the small - graph of second build. - -Bazel only does bottom-up invalidation. - -To get further incrementality, Bazel uses _change pruning_: if a node is -invalidated, but upon rebuild, it is discovered that its new value is the same -as its old value, the nodes that were invalidated due to a change in this node -are "resurrected". - -This is useful, for example, if one changes a comment in a C++ file: then the -`.o` file generated from it will be the same, thus, it is unnecessary to call -the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an -all-or-nothing affair: when a dependency changes, the dependent node is always -rebuilt from scratch, even if a better algorithm would exist that would mutate -the old value of the node based on the changes. A few examples where this would -be useful: - -* Incremental linking -* When a single class file changes in a JAR file, it is possible - modify the JAR file in-place instead of building it from scratch again. - -The reason why Bazel does not support these things in a principled way -is twofold: - -* There were limited performance gains. -* Difficulty to validate that the result of the mutation is the same as that - of a clean rebuild would be, and Google values builds that are bit-for-bit - repeatable. - -Until now, it was possible to achieve good enough performance by decomposing an -expensive build step and achieving partial re-evaluation that way. For example, -in an Android app, you can split all the classes into multiple groups and dex -them separately. This way, if classes in a group are unchanged, the dexing does -not have to be redone. - -## Mapping to Bazel concepts - -This is high level summary of the key `SkyFunction` and `SkyValue` -implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, the - function also computes additional information in order to detect changes to - the file. This is the lowest level node in the Skyframe graph and has no - dependencies. -* **FileValue**. Used by anything that cares about the actual contents or - resolved path of a file. Depends on the corresponding `FileStateValue` and - any symlinks that need to be resolved (such as the `FileValue` for `a/b` - needs the resolved path of `a` and the resolved path of `a/b`). The - distinction between `FileValue` and `FileStateValue` is important because - the latter can be used in cases where the contents of the file are not - actually needed. For example, the file contents are irrelevant when - evaluating file system globs (such as `srcs=glob(["*/*.java"])`). -* **DirectoryListingStateValue**. The result of `readdir()`. Like - `FileStateValue`, this is the lowest level node and has no dependencies. -* **DirectoryListingValue**. Used by anything that cares about the entries of - a directory. Depends on the corresponding `DirectoryListingStateValue`, as - well as the associated `FileValue` of the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on - the `FileValue` of the associated `BUILD` file, and also transitively on any - `DirectoryListingValue` that is used to resolve the globs in the package - (the data structure representing the contents of a `BUILD` file internally). -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple - of the set of actions generated during the analysis of a target and - information provided to dependent configured targets. Depends on the - `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` - of direct dependencies, and a special node representing the build - configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an - output artifact. Artifacts are almost equivalent to files, and are used to - refer to files during the actual execution of build steps. Source files - depends on the `FileValue` of the associated node, and output artifacts - depend on the `ActionExecutionValue` of whatever action generates the - artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on - the `ArtifactValues` of its input files. The action it executes is contained - within its SkyKey, which is contrary to the concept that SkyKeys should be - small. Note that `ActionExecutionValue` and `ArtifactValue` are unused if - the execution phase does not run. - -As a visual aid, this diagram shows the relationships between -SkyFunction implementations after a build of Bazel itself: - -![A graph of SkyFunction implementation relationships](/reference/skyframe.png) diff --git a/8.0.1/release/backward-compatibility.mdx b/8.0.1/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/8.0.1/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/8.0.1/release/index.mdx b/8.0.1/release/index.mdx deleted file mode 100644 index 2956bb9..0000000 --- a/8.0.1/release/index.mdx +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 8 | Rolling| [Check rolling release page](https://bazel.build/release/rolling) | N/A | -| Bazel 7 | Active| [7.4.1](https://github.com/bazelbuild/bazel/releases/tag/7.4.1) | Dec 2026 | -| Bazel 6 | Maintenance | [6.5.0](https://github.com/bazelbuild/bazel/releases/tag/6.5.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Deprecated | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel LTS releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 5 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Backport changes using Cherry-Pick Request Issue for Bazel maintainers. - * Bazel maintainers can request to cherry-pick specific commit(s) - to a release branch. This process is initiated by creating a - cherry-pick request on GitHub. Here's how to do it. - 1. Open the [cherry-pick request](https://github.com/bazelbuild/bazel/issues/new?assignees=&labels=&projects=&template=cherry_pick_request.yml) - 2. Fill in the request details - * Title: Provide a concise and descriptive title for the request. - * Commit ID(s): Enter the ID(s) of the commit(s) you want to - cherry-pick. If there are multiple commits, then separate - them with commas. - * Category: Specify the category of the request. - * Reviewer(s): For multiple reviewers, separate their GitHub - ID's with commas. - 3. Set the milestone - * Find the "Milestone" section and click the setting. - * Select the appropriate X.Y.Z release blockers. This action - triggers the cherry-pick bot to process your request - for the "release-X.Y.Z" branch. - 4. Submit the Issue - * Once all details are filled in and the miestone is set, - submit the issue. - - * The cherry-pick bot will process the request and notify - if the commit(s) are eligible for cherry-picking. If - the commits are cherry-pickable, which means there's no - merge conflict while cherry-picking the commit, then - the bot will create a new pull request. When the pull - request is approved by a member of the Bazel team, the - commits are cherry-picked and merged to the release branch. - For a visual example of a completed cherry-pick request, - refer to this - [example](https://github.com/bazelbuild/bazel/issues/20230) - . - -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created; cherry-picks are limited to critical - fixes only. If a cherry-pick is needed, the requester must answer the - following questions: Why is this change critical, and what benefits does - it provide? What is the likelihood of this change introducing a - regression? -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/8.0.1/release/rule-compatibility.mdx b/8.0.1/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/8.0.1/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/8.0.1/remote/bep-examples.mdx b/8.0.1/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/8.0.1/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/8.0.1/remote/bep-glossary.mdx b/8.0.1/remote/bep-glossary.mdx deleted file mode 100644 index 3bd11ee..0000000 --- a/8.0.1/remote/bep-glossary.mdx +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/8.0.1/remote/bep.mdx b/8.0.1/remote/bep.mdx deleted file mode 100644 index bafdaa9..0000000 --- a/8.0.1/remote/bep.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L131-L140) -or [structured -information](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L194-L205) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L1276). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--bes_instance_name` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/8.0.1/remote/cache-local.mdx b/8.0.1/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/8.0.1/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/8.0.1/remote/cache-remote.mdx b/8.0.1/remote/cache-remote.mdx deleted file mode 100644 index a614f4f..0000000 --- a/8.0.1/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/8.0.1/remote/caching.mdx b/8.0.1/remote/caching.mdx deleted file mode 100644 index 8fd6adc..0000000 --- a/8.0.1/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [NativeLink](https://github.com/TraceMachina/nativelink) diff --git a/8.0.1/remote/creating.mdx b/8.0.1/remote/creating.mdx deleted file mode 100644 index 0e46a07..0000000 --- a/8.0.1/remote/creating.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output message describing any errors encountered in processing or executing -the request. A worker should capture the `stdout` and `stderr` of any tool it -calls and report them through the `WorkResponse`. Writing it to the `stdout` of -the worker process is unsafe, as it will interfere with the worker protocol. -Writing it to the `stderr` of the worker process is safe, but the result is -collected in a per-worker log file instead of ascribed to individual actions. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/8.0.1/remote/multiplex.mdx b/8.0.1/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/8.0.1/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/8.0.1/remote/output-directories.mdx b/8.0.1/remote/output-directories.mdx deleted file mode 100644 index bdbe029..0000000 --- a/8.0.1/remote/output-directories.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `${XDG_CACHE_HOME}/bazel` (or - `~/.cache/bazel`, if the `XDG_CACHE_HOME` environment variable is not set) on - Linux, `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <..._main> <== Symlink to execRoot - bazel-out => <...bazel-out> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - _tmp/actions/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/8.0.1/remote/persistent.mdx b/8.0.1/remote/persistent.mdx deleted file mode 100644 index 1a56946..0000000 --- a/8.0.1/remote/persistent.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/8.0.1/remote/rbe.mdx b/8.0.1/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/8.0.1/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/8.0.1/remote/rules.mdx b/8.0.1/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/8.0.1/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/8.0.1/remote/sandbox.mdx b/8.0.1/remote/sandbox.mdx deleted file mode 100644 index cfb9be4..0000000 --- a/8.0.1/remote/sandbox.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --define=EXECUTOR=remote -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/8.0.1/remote/workspace.mdx b/8.0.1/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/8.0.1/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/8.0.1/rules/bzl-style.mdx b/8.0.1/rules/bzl-style.mdx deleted file mode 100644 index 941028a..0000000 --- a/8.0.1/rules/bzl-style.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Conventionally, `name` should be the first argument when defining a macro. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/8.0.1/rules/challenges.mdx b/8.0.1/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/8.0.1/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/8.0.1/rules/deploying.mdx b/8.0.1/rules/deploying.mdx deleted file mode 100644 index 3fe2c86..0000000 --- a/8.0.1/rules/deploying.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - MODULE.bazel - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### MODULE.bazel - -In the project's `MODULE.bazel`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -module(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains a brief description -of your ruleset, and the API users should expect. - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies, which you'll need to specify in -your MODULE.bazel file. - -#### Registering toolchains - -Your rules might also register toolchains, which you can also specify in the -MODULE.bazel file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `MODULE.bazel` file. This snippet in general will look as follows: - -``` -bazel_dep(name = "rules_", version = "") -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to add a dependency on your ruleset in their `MODULE.bazel` file. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/8.0.1/rules/errors/read-only-variable.mdx b/8.0.1/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/8.0.1/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/8.0.1/rules/faq.mdx b/8.0.1/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/8.0.1/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/8.0.1/rules/index.mdx b/8.0.1/rules/index.mdx deleted file mode 100644 index 2a6c3eb..0000000 --- a/8.0.1/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Rust](https://github.com/bazelbuild/rules_rust) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/8.0.1/rules/legacy-macro-tutorial.mdx b/8.0.1/rules/legacy-macro-tutorial.mdx deleted file mode 100644 index 44cdcfb..0000000 --- a/8.0.1/rules/legacy-macro-tutorial.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 'Creating a Legacy Macro' ---- - - - -IMPORTANT: This tutorial is for [*legacy macros*](/extending/legacy-macros). If -you only need to support Bazel 8 or newer, we recommend using [symbolic -macros](/extending/macros) instead; take a look at [Creating a Symbolic -Macro](../macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a legacy macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -```starlark -def miniature(name, src, size = "100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - # Note that the line below will fail if `src` is not a filename string - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - - * By convention, legacy macros have a `name` argument, just like rules. - - * To document the behavior of a legacy macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - - * To call a `genrule`, or any other native rule, prefix with `native.`. - - * Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like - `visibility`, or `tags`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -And finally, a **warning note**: the macro assumes that `src` is a filename -string (otherwise, `outs = ["small_" + src]` will fail). So `src = "image.png"` -works; but what happens if the `BUILD` file instead used `src = -"//other/package:image.png"`, or even `src = select(...)`? - -You should make sure to declare such assumptions in your macro's documentation. -Unfortunately, legacy macros, especially large ones, tend to be fragile because -it can be hard to notice and document all such assumptions in your code – and, -of course, some users of the macro won't read the documentation. We recommend, -if possible, instead using [symbolic macros](/extending/macros), which have -built\-in checks on attribute types. diff --git a/8.0.1/rules/macro-tutorial.mdx b/8.0.1/rules/macro-tutorial.mdx deleted file mode 100644 index 93825aa..0000000 --- a/8.0.1/rules/macro-tutorial.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Creating a Symbolic Macro' ---- - - - -IMPORTANT: This tutorial is for [*symbolic macros*](/extending/macros) – the new -macro system introduced in Bazel 8. If you need to support older Bazel versions, -you will want to write a [legacy macro](/extending/legacy-macros) instead; take -a look at [Creating a Legacy Macro](../legacy-macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a symbolic macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define an *implementation function* and a *macro declaration* in a separate -`.bzl` file, and call the file `miniature.bzl`: - -```starlark -# Implementation function -def _miniature_impl(name, visibility, src, size, **kwargs): - native.genrule( - name = name, - visibility = visibility, - srcs = [src], - outs = [name + "_small_" + src.name], - cmd = "convert $< -resize " + size + " $@", - **kwargs, - ) - -# Macro declaration -miniature = macro( - doc = """Create a miniature of the src image. - - The generated file name will be prefixed with `name + "_small_"`. - """, - implementation = _miniature_impl, - # Inherit most of genrule's attributes (such as tags and testonly) - inherit_attrs = native.genrule, - attrs = { - "src": attr.label( - doc = "Image file", - allow_single_file = True, - # Non-configurable because our genrule's output filename is - # suffixed with src's name. (We want to suffix the output file with - # srcs's name because some tools that operate on image files expect - # the files to have the right file extension.) - configurable = False, - ), - "size": attr.string( - doc = "Output size in WxH format", - default = "100x100", - ), - # Do not allow callers of miniature() to set srcs, cmd, or outs - - # _miniature_impl overrides their values when calling native.genrule() - "srcs": None, - "cmd": None, - "outs": None, - }, -) -``` - -A few remarks: - - * Symbolic macro implementation functions must have `name` and `visibility` - parameters. They should used for the macro's main target. - - * To document the behavior of a symbolic macro, use `doc` parameters for - `macro()` and its attributes. - - * To call a `genrule`, or any other native rule, use `native.`. - - * Use `**kwargs` to forward the extra inherited arguments to the underlying - `genrule` (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful so that a user can set standard attributes like `tags` or - `testonly`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/8.0.1/rules/performance.mdx b/8.0.1/rules/performance.mdx deleted file mode 100644 index 5c4fd44..0000000 --- a/8.0.1/rules/performance.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -The [`--experimental_command_profile`](https://bazel.build/reference/command-line-reference#flag--experimental_command_profile) -flag may be used to capture Java Flight Recorder profiles of various kinds -(cpu time, wall time, memory allocations and lock contention). - -The [`--starlark_cpu_profile`](https://bazel.build/reference/command-line-reference#flag--starlark_cpu_profile) -flag may be used to write a pprof profile of CPU usage by all Starlark threads. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.0 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/8.0.1/rules/rules-tutorial.mdx b/8.0.1/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/8.0.1/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/8.0.1/rules/testing.mdx b/8.0.1/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/8.0.1/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/8.0.1/rules/verbs-tutorial.mdx b/8.0.1/rules/verbs-tutorial.mdx deleted file mode 100644 index db7757e..0000000 --- a/8.0.1/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following symbolic macro (available in Bazel 8 or newer) to -create targets for both of the above rules together: - -```starlark -def _sphinx_site_impl(name, visibility, srcs, **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. We - # set `visibility = visibility` to make it visible to callers of the - # macro. - _sphinx_site(name = name, visibility = visibility, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. We don't want it to be visible to callers of - # our macro, so we omit visibility for it. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) - -sphinx_site = macro( - implementation = _sphinx_site_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, - # Inherit common attributes like tags and testonly - inherit_attrs = "common", -) -``` - -Or, if you need to support Bazel releases older than Bazel 8, you would instead -define a legacy macro: - -```starlark -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/8.0.1/run/bazelrc.mdx b/8.0.1/run/bazelrc.mdx deleted file mode 100644 index 15f89c8..0000000 --- a/8.0.1/run/bazelrc.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `MODULE.bazel` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage`, `fetch`, and `vendor` inherit from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### `--enable_platform_specific_config` - -Platform specific configs in the `.bazelrc` can be automatically enabled using -`--enable_platform_specific_config`. For example, if the host OS is Linux and -the `build` command is run, the `build:linux` configuration will be -automatically enabled. Supported OS identifiers are `linux`, `macos`, `windows`, -`freebsd`, and `openbsd`. Enabling this flag is equivalent to using -`--config=linux` on Linux, `--config=windows` on Windows, and so on. - -See [--enable_platform_specific_config](/reference/command-line-reference#flag--enable_platform_specific_config). - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/8.0.1/run/client-server.mdx b/8.0.1/run/client-server.mdx deleted file mode 100644 index 1868635..0000000 --- a/8.0.1/run/client-server.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. Each server can handle at most one -invocation at a time; further concurrent invocations will either block or -fail-fast (see `--block_for_lock`). - -When you run `bazel`, you're running the client. The client finds the server -based on the [output base](/run/scripts#output-base-option), which by default is -determined by the path of the base workspace directory and your userid, so if -you build in multiple workspaces, you'll have multiple output bases and thus -multiple Bazel server processes. Multiple users on the same workstation can -build concurrently in the same workspace because their output bases will differ -(different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/8.0.1/run/scripts.mdx b/8.0.1/run/scripts.mdx deleted file mode 100644 index f267c90..0000000 --- a/8.0.1/run/scripts.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you should be mindful that each Blaze server process can handle at most one -invocation [at a time](/run/client-server#clientserver-implementation). -Depending on your situation it may make sense for each instance of your script -to wait its turn, or it may make sense to use `--output_base` to run multiple -Blaze servers and use those. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Transient error publishing results to the Build Event Service. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Persistent error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. -- `49` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/8.0.1/start/android-app.mdx b/8.0.1/start/android-app.mdx deleted file mode 100644 index b0e6f1b..0000000 --- a/8.0.1/start/android-app.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel workspace that contains the source code - for the app and a `MODULE.bazel` file that identifies the top level of the - workspace directory. -* Update the `MODULE.bazel` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `MODULE.bazel` file at -its root. - -The `MODULE.bazel` file may be empty or may contain references to [external -dependencies](/external/overview) required to build your project. - -First, run the following command to create an empty `MODULE.bazel` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch MODULE.bazel` | -| Windows (Command Prompt) | `type nul > MODULE.bazel` | -| Windows (PowerShell) | `New-Item MODULE.bazel -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`MODULE.bazel` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`MODULE.bazel` file so that Bazel knows where to find them. - -Add the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android", version = "0.5.1") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and use `rules_android_ndk` by adding the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android_ndk", version = "0.1.2") -``` - - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `MODULE.bazel` file. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `MODULE.bazel` file is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/8.0.1/start/cpp.mdx b/8.0.1/start/cpp.mdx deleted file mode 100644 index adb7c71..0000000 --- a/8.0.1/start/cpp.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You're in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel's context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven't -already. This tutorial uses Git for source control, so for best results [install -Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` -directory. - -Take a look at how it's structured: - -```none -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── MODULE.bazel - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In the third and final -stage, you will build a project with multiple packages and build it with -multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your -[workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -Before you can build a project, you need to set up its workspace. A workspace -is a directory that holds your project's source files and Bazel's build outputs. -It also contains these significant files: - -* The `MODULE.bazel` file, which identifies the directory and its contents as - a Bazel workspace and lives at the root of the project's directory - structure. It's also where you specify your external dependencies. -* One or more [`BUILD` - files](https://bazel.build/reference/glossary#build-file), which tell Bazel - how to build different parts of the project. A directory within the - workspace that contains a `BUILD` file is a - [package](https://bazel.build/reference/glossary#package). (More on packages - later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `MODULE.bazel` in that directory. For the purposes of this -tutorial, a `MODULE.bazel` file is already present in each stage. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one -[rule](https://bazel.build/reference/glossary#rule) as a set of instructions, -which tells Bazel how to build the outputs you want, such as executable binaries -or libraries. Each instance of a build rule in the `BUILD` file is called a -[target](https://bazel.build/reference/glossary#target) and points to a specific -set of source files and -[dependencies](https://bazel.build/reference/glossary#dependency). A target can -also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -```bazel -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[`cc_binary` rule](https://bazel.build/reference/be/c-cpp#cc_binary). The rule -tells Bazel to build a self-contained executable binary from the -`hello-world.cc`> source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - -## Stage 1: single target, single package - -It's time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -```none -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── MODULE.bazel -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed "`Hello world`" message. - -Here's the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source -file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world -displays a single target with a single source file.") - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a -build is structured. In the next stage, you will add complexity by adding -another target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up -your builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -```none - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel -``` - -Take a look at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library (using -Bazel's built-in [`cc_library` -rule](https://bazel.build/reference/be/c-cpp#cc_library)), then the -`hello-world` binary. The `deps` attribute in the `hello-world` target tells -Bazel that the `hello-greet` library is required to build the `hello-world` -binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another "`Hello -world`": - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an -extra input named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after -modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency -graph for `hello-world` displays dependency changes after modification to the -file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look at the structure and contents of the -`cpp-tutorial/stage3` directory: - -```none -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -```bazel -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package -depends on the target in the `lib` -package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for -`hello-world` displays how the target in the main package depends on the target -in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You've now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use - cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the - tutorials for [Java](https://bazel.build/start/java), [Android - application](https://bazel.build/start/android-app), or [iOS - application](https://bazel.build/start/ios-app). -* To learn more about working with local and remote repositories, read about - [external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel's other rules, see this [reference - guide](https://bazel.build/rules). - -Happy building! diff --git a/8.0.1/start/ios-app.mdx b/8.0.1/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/8.0.1/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/8.0.1/start/java.mdx b/8.0.1/start/java.mdx deleted file mode 100644 index b892917..0000000 --- a/8.0.1/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -one source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/8.0.1/tutorials/cpp-dependency.mdx b/8.0.1/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/8.0.1/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/8.0.1/tutorials/cpp-labels.mdx b/8.0.1/tutorials/cpp-labels.mdx deleted file mode 100644 index 78d0dbc..0000000 --- a/8.0.1/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `MODULE.bazel` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/8.0.1/tutorials/cpp-use-cases.mdx b/8.0.1/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 6695cce..0000000 --- a/8.0.1/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── MODULE.bazel -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Include external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest) -. -You can add a dependency on it in the `MODULE.bazel` file to -download Google Test and make it available in your repository: - -```python -bazel_dep(name = "googletest", version = "1.15.2") -``` - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = [ - "-Iexternal/gtest/googletest/include", - "-Iexternal/gtest/googletest", - ], - deps = [ - "@googletest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/8.0.1/versions/index.mdx b/8.0.1/versions/index.mdx deleted file mode 100644 index 4290e57..0000000 --- a/8.0.1/versions/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The default documentation on this website represents the latest version at HEAD. -Each major and minor supported release will have a snapshot of the narrative and -reference documentation that follows the lifecycle of Bazel's version support. - -To see documentation for stable Bazel versions, use the "Versioned docs" -drop-down. - -To see documentation for older Bazel versions prior to Feb 2022, go to -[docs.bazel.build](https://docs.bazel.build/). diff --git a/8.1.1/about/faq.mdx b/8.1.1/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/8.1.1/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/8.1.1/about/intro.mdx b/8.1.1/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/8.1.1/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/8.1.1/about/roadmap.mdx b/8.1.1/about/roadmap.mdx deleted file mode 100644 index 2e18b78..0000000 --- a/8.1.1/about/roadmap.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -As the Bazel project continues to evolve in response to your needs, we want to -share our 2024 update. - -This roadmap describes current initiatives and predictions for the future of -Bazel development, giving you visibility into current priorities and ongoing -projects. - -## Bazel 8.0 Release - -We plan to bring Bazel 8.0 [long term support -(LTS)](https://bazel.build/release/versioning) to you in late 2024. -The following features are planned to be implemented. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive -dependencies, allowing projects to scale while staying fast and -resource-efficient. - -With Bazel 8, we will disable WORKSPACE support by default (it will still be -possible to enable it using `--enable_workspace`); with Bazel 9 WORKSPACE -support will be removed. Starting with Bazel 7.1, you can set -`--noenable_workspace` to opt into the new behavior. - -Bazel 8.0 will contain a number of enhancements to -[Bazel's external dependency management] -(https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) -functionality, including: - -* The new flag `--enable_workspace` can be set to `false` to completely - disable WORKSPACE functionality. -* New directory watching API (see - [#21435](https://github.com/bazelbuild/bazel/pull/21435), shipped in Bazel - 7.1). -* Improved scheme for generating canonical repository names for better - cacheability of actions across dependency version updates. - ([#21316](https://github.com/bazelbuild/bazel/pull/21316), shipped in Bazel - 7.1) -* An improved shared repository cache (see - [#12227](https://github.com/bazelbuild/bazel/issues/12227)). -* Vendor and offline mode support — allows users to run builds with - pre-downloaded dependencies (see - [#19563](https://github.com/bazelbuild/bazel/issues/19563)). -* Reduced merge conflicts in lock files - ([#20396](https://github.com/bazelbuild/bazel/issues/20369)). -* Segmented MODULE.bazel - ([#17880](https://github.com/bazelbuild/bazel/issues/17880)) -* Allow overriding module extension generated repository - ([#19301](https://github.com/bazelbuild/bazel/issues/19301)) -* Improved documentation (e.g. - [#18030](https://github.com/bazelbuild/bazel/issues/18030), - [#15821](https://github.com/bazelbuild/bazel/issues/15821)) and migration - guide and migration tooling. - -### Remote execution improvements - -* Add support for asynchronous execution, speeding up remote execution by - increased parallelism with flag `--jobs`. -* Make it easier to debug cache misses by a new compact execution log, - reducing its size by 100x and its runtime overhead significantly (see - [#18643](https://github.com/bazelbuild/bazel/issues/18643)). -* Implement garbage collection for the disk cache (see - [#5139](https://github.com/bazelbuild/bazel/issues/5139)). -* Implement remote output service to allow lazy downloading of arbitrary build - outputs (see - [#20933](https://github.com/bazelbuild/bazel/discussions/20933)). - -### Migration of Android, C++, Java, Python, and Proto rules - -Complete migration of Android, C++, Java, and Python rulesets to dedicated -repositories and decoupling them from the Bazel releases. This effort allows -Bazel users and rule authors to - -* Update rules independently of Bazel. -* Update and customize rules as needed. - -The new location of the rulesets is going to be `bazelbuild/rules_android`, -`rules_cc`, `rules_java`, `rules_python` and `google/protobuf`. `rules_proto` is -going to be deprecated. - -Bazel 8 will provide a temporary migration flag that will automatically use the -rulesets that were previously part of the binary from their repositories. All -the users of those rulesets are expected to eventually depend on their -repositories and load them similarly to other rulesets that were never part of -Bazel. - -Bazel 8 will also improve on the existing extending rules and subrule APIs and -mark them as non-experimental. - -### Starlark improvements - -* Symbolic Macros are a new way of writing macros that is friendlier to - `BUILD` users, macro authors, and tooling. Compared to legacy macros, which - Bazel has only limited insight into, symbolic macros help users avoid common - pitfalls and enforce best practices. -* Package finalizers are a proposed feature for adding first-class support for - custom package validation logic. They are intended to help us deprecate - `native.existing_rules()`. - -### Configurability - -* Output path mapping continues to stabilize: promising better remote cache - performance and build speed for rule designers who use transitions. -* Automatically set build flags suitable for a given `--platforms`. -* Define project-supported flag combinations and automatically build targets - with default flags without having to set bazelrcs. -* Don't redo build analysis every time build flags change. - -### Project Skyfocus - minimize retained data structures - -Bazel holds a lot of state in RAM for fast incremental builds. However, -developers often change a small subset of the source files (e.g. almost never -one of the external dependencies). With Skyfocus, Bazel will provide an -experimental way to drop unnecessary incremental state and reduce Bazel's memory -footprint, while still providing the same fast incremental build experience. - -The initial scope aims to improve the retained heap metric only. Peak heap -reduction is a possibility, but not included in the initial scope. - -### Misc - -* Mobile install v3, a simpler and better maintained approach to incrementally - deploy Android applications. -* Garbage collection for repository caches and Bazel's `install_base`. -* Reduced sandboxing overhead. - -### Bazel-JetBrains* IntelliJ IDEA support - -Incremental IntelliJ plugin updates to support the latest JetBrains plugin -release. - -*This roadmap snapshots targets, and should not be taken as guarantees. -Priorities are subject to change in response to developer and customer -feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the -[Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/8.1.1/about/vision.mdx b/8.1.1/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/8.1.1/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/8.1.1/about/why.mdx b/8.1.1/about/why.mdx deleted file mode 100644 index 97cfa36..0000000 --- a/8.1.1/about/why.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Why Bazel?' ---- - - - -Bazel is a [fast](#fast), [correct](#correct), and [extensible](#extensible) -build tool with [integrated testing](#integrated-testing) that supports multiple -[languages](#multi-language), [repositories](#multi-repository), and -[platforms](#multi-platform) in an industry-leading [ecosystem](#ecosystem). - -## Bazel is fast - -Bazel knows exactly what input files each build command needs, avoiding -unnecessary work by re-running only when the set of input files have -changed between each build. -It runs build commands with as much parallelism as possible, either within the -same computer or on [remote build nodes](/remote/rbe). If the structure of build -allows for it, it can run thousands of build or test commands at the same time. - -This is supported by multiple caching layers, in memory, on disk and on the -remote build farm, if available. At Google, we routinely achieve cache hit rates -north of 99%. - -## Bazel is correct - -Bazel ensures that your binaries are built *only* from your own -source code. Bazel actions run in individual sandboxes and Bazel tracks -every input file of the build, only and always re-running build -commands when it needs to. This keeps your binaries up-to-date so that the -[same source code always results in the same binary](/basics/hermeticity), bit -by bit. - -Say goodbyte to endless `make clean` invocations and to chasing phantom bugs -that were in fact resolved in source code that never got built. - -## Bazel is extensible - -Harness the full power of Bazel by writing your own rules and macros to -customize Bazel for your specific needs across a wide range of projects. - -Bazel rules are written in [Starlark](/rules/language), our -in-house programming language that's a subset of Python. Starlark makes -rule-writing accessible to most developers, while also creating rules that can -be used across the ecosystem. - -## Integrated testing - -Bazel's [integrated test runner](/docs/user-manual#running-tests) -knows and runs only those tests needing to be re-run, using remote execution -(if available) to run them in parallel. Detect flakes early by using remote -execution to quickly run a test thousands of times. - -Bazel [provides facilities](/remote/bep) to upload test results to a central -location, thereby facilitating efficient communication of test outcomes, be it -on CI or by individual developers. - -## Multi-language support - -Bazel supports many common programming languages including C++, Java, -Kotlin, Python, Go, and Rust. You can build multiple binaries (for example, -backend, web UI and mobile app) in the same Bazel invocation without being -constrained to one language's idiomatic build tool. - -## Multi-repository support - -Bazel can [gather source code from multiple locations](/external/overview): you -don't need to vendor your dependencies (but you can!), you can instead point -Bazel to the location of your source code or prebuilt artifacts (e.g. a git -repository or Maven Central), and it takes care of the rest. - -## Multi-platform support - -Bazel can simultaneously build projects for multiple platforms including Linux, -macOS, Windows, and Android. It also provides powerful -[cross-compilation capabilities](/extending/platforms) to build code for one -platform while running the build on another. - -## Wide ecosystem - -[Industry leaders](/community/users) love Bazel, building a large -community of developers who use and contribute to Bazel. Find a tools, services -and documentation, including [consulting and SaaS offerings](/community/experts) -Bazel can use. Explore extensions like support for programming languages in -our [open source software repositories](/rules). diff --git a/8.1.1/advanced/performance/build-performance-breakdown.mdx b/8.1.1/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/8.1.1/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/8.1.1/advanced/performance/build-performance-metrics.mdx b/8.1.1/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/8.1.1/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/8.1.1/advanced/performance/iteration-speed.mdx b/8.1.1/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 2bbf839..0000000 --- a/8.1.1/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. - -To make this condition an error rather than a warning, you can use the -`--noallow_analysis_cache_discard` flag (introduced in Bazel 6.4.0) diff --git a/8.1.1/advanced/performance/json-trace-profile.mdx b/8.1.1/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 56e278c..0000000 --- a/8.1.1/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query, Bazel writes a profile into -the output base named `command-$INOVCATION_ID.profile.gz`, where -`$INOVCATION_ID` is the invocation identifier of the command. Bazel also creates -a symlink called `command.profile.gz` in the output base that points the profile -of the latest command. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Bazel keeps the last 5 profiles, configurable by -[`--profiles_to_retain`](/reference/command-line-reference#flag--generate_json_trace_profile), -in the output base by default for post-build analysis. Explicitly passing a -profile path with `--profile` disables automatic garbage collection. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/8.1.1/advanced/performance/memory.mdx b/8.1.1/advanced/performance/memory.mdx deleted file mode 100644 index 844e691..0000000 --- a/8.1.1/advanced/performance/memory.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -### Trade incremental build speeds for memory - -If your builds are too big, Bazel may throw an `OutOfMemoryError` (OOM) when -it doesn't have enough memory. You can make Bazel use less memory, at the cost -of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -### Trade build flexibility for memory with Skyfocus (Experimental) - -If you want to make Bazel use less memory *and* retain incremental build speeds, -you can tell Bazel the working set of files that you will be modifying, and -Bazel will only keep state needed to correctly incrementally rebuild changes to -those files. This feature is called **Skyfocus**. - -To use Skyfocus, pass the `--experimental_enable_skyfocus` flag: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus -``` - -By default, the working set will be the set of files next to the target being -built. In the example, all files in `//pkg` will be kept in the working set, and -changes to files outside of the working set will be disallowed, until you issue -`bazel clean` or restart the Bazel server. - -If you want to specify an exact set of files or directories, use the -`--experimental_working_set` flag, like so: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus ---experimental_working_set=path/to/another/dir,path/to/tests/dir -``` - -You can also pass `--experimental_skyfocus_dump_post_gc_stats` to show the -memory reduction amount: - -Putting it altogether, you should see something like this: - -```none -$ bazel test //pkg:target //tests/... --experimental_enable_skyfocus --experimental_working_set dir1,dir2,dir3/subdir --experimental_skyfocus_dump_post_gc_stats -INFO: --experimental_enable_skyfocus is enabled. Blaze will reclaim memory not needed to build the working set. Run 'blaze dump --skyframe=working_set' to show the working set, after this command. -WARNING: Changes outside of the working set will cause a build error. -INFO: Analyzed 149 targets (4533 packages loaded, 169438 targets configured). -INFO: Found 25 targets and 124 test targets... -INFO: Updated working set successfully. -INFO: Focusing on 334 roots, 3 leafs... (use --experimental_skyfocus_dump_keys to show them) -INFO: Heap: 1237MB -> 676MB (-45.31%) -INFO: Elapsed time: 192.670s ... -INFO: Build completed successfully, 62303 total actions -``` - -For this example, using Skyfocus allowed Bazel to drop 561MB (45%) of memory, -and incremental builds to handle changes to files under `dir1`, `dir2`, and -`dir3/subdir` will retain their fast speeds, with the tradeoff that Bazel cannot -rebuild changed files outside of these directories. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. diff --git a/8.1.1/basics/artifact-based-builds.mdx b/8.1.1/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/8.1.1/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/8.1.1/basics/build-systems.mdx b/8.1.1/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/8.1.1/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/8.1.1/basics/dependencies.mdx b/8.1.1/basics/dependencies.mdx deleted file mode 100644 index 1d3bf8f..0000000 --- a/8.1.1/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need -to have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the [`buildtools` -directory](https://github.com/bazelbuild/buildtools). - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive -dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel [enforces strict -transitive -dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version -Rule](https://opensource.google/docs/thirdparty/oneversion/) for -all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -Bazel did not use to automatically download transitive dependencies. It used to -employ a `WORKSPACE` file that required all transitive dependencies to be -listed, which led to a lot of pain when managing external dependencies. Bazel -has since added support for automatic transitive external dependency management -in the form of the `MODULE.bazel` file. See [external dependency -overview](/external/overview) for more details. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location that -is shared across engineers, so if a developer depends on an artifact that was -recently built by someone else, the build system automatically downloads it -instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) -goes down, because your entire build might grind to a halt if it’s unable to -download an external dependency. There’s also a security risk: if the -third-party system is compromised by an attacker, the attacker could replace the -referenced artifact with one of their own design, allowing them to inject -arbitrary code into your build. Both problems can be mitigated by mirroring any -artifacts you depend on onto servers you control and blocking your build system -from accessing third-party artifact repositories like Maven Central. The -trade-off is that these mirrors take effort and resources to maintain, so the -choice of whether to use them often depends on the scale of the project. The -security issue can also be completely prevented with little overhead by -requiring the hash of each third-party artifact to be specified in the source -repository, causing the build to fail if the artifact is tampered with. Another -alternative that completely sidesteps the issue is to vendor your project’s -dependencies. When a project vendors its dependencies, it checks them into -source control alongside the project’s source code, either as source or as -binaries. This effectively means that all of the project’s external dependencies -are converted to internal dependencies. Google uses this approach internally, -checking every third-party library referenced throughout Google into a -`third_party` directory at the root of Google’s source tree. However, this works -at Google only because Google’s source control system is custom built to handle -an extremely large monorepo, so vendoring might not be an option for all -organizations. diff --git a/8.1.1/basics/distributed-builds.mdx b/8.1.1/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/8.1.1/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/8.1.1/basics/hermeticity.mdx b/8.1.1/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/8.1.1/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/8.1.1/basics/index.mdx b/8.1.1/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/8.1.1/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/8.1.1/basics/task-based-builds.mdx b/8.1.1/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/8.1.1/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/8.1.1/brand/index.mdx b/8.1.1/brand/index.mdx deleted file mode 100644 index 2a21cd4..0000000 --- a/8.1.1/brand/index.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Bazel Brand Guidelines' ---- - - - -The Bazel trademark and logo ("Bazel Trademarks") are trademarks of Google, and -are treated separately from the copyright or patent license grants contained in -the Apache-licensed Bazel repositories on GitHub. Any use of the Bazel -Trademarks other than those permitted in these guidelines must be approved in -advance. - -## Purpose of the Brand Guidelines - -These guidelines exist to ensure that the Bazel project can share its technology -under open source licenses while making sure that the "Bazel" brand is protected -as a meaningful source identifier in a way that's consistent with trademark law. -By adhering to these guidelines, you help to promote the freedom to use and -develop high-quality Bazel technology. - -## Acceptable Uses - -Given the open nature of Bazel, you may use the Bazel trademark to refer to the -project without prior written permission. Examples of these approved references -include the following: - -* To refer to the Bazel Project itself; -* To link to bazel.build; -* To refer to unmodified source code or other files shared by the Bazel - repositories on GitHub; -* In blog posts, news articles, or educational materials about Bazel; -* To accurately identify that your design or implementation is based on, is - for use with, or is compatible with Bazel technology. - -Examples: - -* \[Your Product\] for Bazel -* \[Your Product\] is compatible with Bazel -* \[XYZ\] Conference for Bazel Users - -## General Guidelines - -* The Bazel name may never be used or registered in a manner that would cause - confusion as to Google's sponsorship, affiliation, or endorsement. -* Don't use the Bazel name as part of your company name, product name, domain - name, or social media profile. -* Other than as permitted by these guidelines, the Bazel name should not be - combined with other trademarks, terms, or source identifiers. -* Don't remove, distort or alter any element of the Bazel Trademarks. That - includes modifying the Bazel Trademark, for example, through hyphenation, - combination or abbreviation. Do not shorten, abbreviate, or create acronyms - out of the Bazel Trademarks. -* Don't display the word Bazel using any different stylization, color, or font - from the surrounding text. -* Don't use the term Bazel as a verb or use it in possessive form. -* Don't use the Bazel logo on any website, product UI, or promotional - materials without prior written permission from - [product@bazel.build](mailto:product@bazel.build). - -## Usage for Events and Community Groups - -The Bazel word mark may be used referentially in events, community groups, or -other gatherings related to the Bazel build system, but it may not be used in a -manner that implies official status or endorsement. - -Examples of appropriate naming conventions are: - -* \[XYZ\] Bazel User Group -* Bazel Community Day at \[XYZ\] -* \[XYZ\] Conference for Bazel Users - -where \[XYZ\] represents the location and optionally other wordings. - -Any naming convention that may imply official status or endorsement requires -review for approval from [product@bazel.build](mailto:product@bazel.build). - -Examples of naming conventions that require prior written permission: - -* BazelCon -* Bazel Conference - -## Contact Us - -Please do not hesitate to contact us at -[product@bazel.build](mailto:product@bazel.build) if you are unsure whether your -intended use of the Bazel Trademarks is in compliance with these guidelines, or -to ask for permission to use the Bazel Trademarks, clearly describing the -intended usage and duration. diff --git a/8.1.1/build/share-variables.mdx b/8.1.1/build/share-variables.mdx deleted file mode 100644 index b248034..0000000 --- a/8.1.1/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/8.1.1/build/style-guide.mdx b/8.1.1/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/8.1.1/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/8.1.1/community/recommended-rules.mdx b/8.1.1/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/8.1.1/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/8.1.1/community/remote-execution-services.mdx b/8.1.1/community/remote-execution-services.mdx deleted file mode 100644 index bede2b8..0000000 --- a/8.1.1/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [NativeLink](https://github.com/TraceMachina/nativelink) - -* Commercial - - * [Aspect Build](https://www.aspect.build/) – Self-hosted remote cache and remote execution services. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service with Build and Test UI. Can be self-hosted or hosted. diff --git a/8.1.1/community/roadmaps-starlark.mdx b/8.1.1/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/8.1.1/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/8.1.1/community/sig.mdx b/8.1.1/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/8.1.1/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/8.1.1/community/update.mdx b/8.1.1/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/8.1.1/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/8.1.1/concepts/build-ref.mdx b/8.1.1/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/8.1.1/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/8.1.1/concepts/platforms.mdx b/8.1.1/concepts/platforms.mdx deleted file mode 100644 index e560ea4..0000000 --- a/8.1.1/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--ios_multi_cpus=x86_64 --apple_platform_type=ios". - //platforms:ios - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - -flags: - # Maps "--ios_multi_cpus=x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/8.1.1/concepts/visibility.mdx b/8.1.1/concepts/visibility.mdx deleted file mode 100644 index 5b1bfd6..0000000 --- a/8.1.1/concepts/visibility.mdx +++ /dev/null @@ -1,591 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. A target will fail -to build during the [analysis](/reference/glossary#analysis-phase) phase if it -violates the visibility of one of its dependencies. - -Generally, a target `A` is visible to a target `B` if they are in the same -location, or if `A` grants visibility to `B`'s location. In the absence of -[symbolic macros](/extending/macros), the term "location" can be simplified -to just "package"; see [below](#symbolic-macros) for more on symbolic macros. - -Visibility is specified by listing allowed packages. Allowing a package does not -necessarily mean that its subpackages are also allowed. For more details on -packages and subpackages, see [Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This shouldn't be done for production usage in -submitted code. - -The primary way to control visibility is with a rule's -[`visibility`](/reference/be/common-definitions#common.visibility) attribute. -The following subsections describe the attribute's format, how to apply it to -various kinds of targets, and the interaction between the visibility system and -symbolic macros. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that don't correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this location's package can use this target. - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets -declared in `//tests/BUILD`, but not by targets defined in -`//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -**Best practice:** When granting visibility to another team's project, prefer -`__subpackages__` over `__pkg__` to avoid needless visibility churn as that -project evolves and adds new subpackages. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is determined by taking its `visibility` attribute --- or a suitable default if not given -- and appending the location where the -target was declared. For targets not declared in a symbolic macro, if the -package specifies a [`default_visibility`](/reference/be/functions#package.default_visibility), -this default is used; for all other packages and for targets declared in a -symbolic macro, the default is just `["//visibility:private"]`. - -```starlark -# //mypkg/BUILD - -package(default_visibility = ["//friend:__pkg__"]) - -cc_library( - name = "t1", - ... - # No visibility explicitly specified. - # Effective visibility is ["//friend:__pkg__", "//mypkg:__pkg__"]. - # If no default_visibility were given in package(...), the visibility would - # instead default to ["//visibility:private"], and the effective visibility - # would be ["//mypkg:__pkg__"]. -) - -cc_library( - name = "t2", - ... - visibility = [":clients"], - # Effective visibility is ["//mypkg:clients, "//mypkg:__pkg__"], which will - # expand to ["//another_friend:__subpackages__", "//mypkg:__pkg__"]. -) - -cc_library( - name = "t3", - ... - visibility = ["//visibility:private"], - # Effective visibility is ["//mypkg:__pkg__"] -) - -package_group( - name = "clients", - packages = ["//another_friend/..."], -) -``` - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -```starlark -# //mypkg/BUILD - -java_binary( - name = "foo", - ... - visibility = ["//friend:__pkg__"], -) -``` - -```starlark -# //friend/BUILD - -some_rule( - name = "bar", - deps = [ - # Allowed directly by visibility of foo. - "//mypkg:foo", - # Also allowed. The java_binary's "_deploy.jar" implicit output file - # target the same visibility as the rule target itself. - "//mypkg:foo_deploy.jar", - ] - ... -) -``` - -### Source file target visibility - -Source file targets can either be explicitly declared using -[`exports_files`](/reference/be/functions#exports_files), or implicitly created -by referring to their filename in a label attribute of a rule (outside of a -symbolic macro). As with rule targets, the location of the call to -`exports_files`, or the BUILD file that referred to the input file, is always -automatically appended to the file's visibility. - -Files declared by `exports_files` can have their visibility set by the -`visibility` parameter to that function. If this parameter is not given, the visibility is public. - -Note: `exports_files` may not be used to override the visibility of a generated -file. - -For files that do not appear in a call to `exports_files`, the visibility -depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is true, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```starlark -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```starlark -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -### Visibility and symbolic macros - -This section describes how the visibility system interacts with -[symbolic macros](/extending/macros). - -#### Locations within symbolic macros - -A key detail of the visibility system is how we determine the location of a -declaration. For targets that are not declared in a symbolic macro, the location -is just the package where the target lives -- the package of the `BUILD` file. -But for targets created in a symbolic macro, the location is the package -containing the `.bzl` file where the macro's definition (the -`my_macro = macro(...)` statement) appears. When a target is created inside -multiple nested targets, it is always the innermost symbolic macro's definition -that is used. - -The same system is used to determine what location to check against a given -dependency's visibility. If the consuming target was created inside a macro, we -look at the innermost macro's definition rather than the package the consuming -target lives in. - -This means that all macros whose code is defined in the same package are -automatically "friends" with one another. Any target directly created by a macro -defined in `//lib:defs.bzl` can be seen from any other macro defined in `//lib`, -regardless of what packages the macros are actually instantiated in. Likewise, -they can see, and can be seen by, targets declared directly in `//lib/BUILD` and -its legacy macros. Conversely, targets that live in the same package cannot -necessarily see one another if at least one of them is created by a symbolic -macro. - -Within a symbolic macro's implementation function, the `visibility` parameter -has the effective value of the macro's `visibility` attribute after appending -the location where the macro was called. The standard way for a macro to export -one of its targets to its caller is to forward this value along to the target's -declaration, as in `some_rule(..., visibility = visibility)`. Targets that omit -this attribute won't be visible to the caller of the macro unless the caller -happens to be in the same package as the macro definition. This behavior -composes, in the sense that a chain of nested calls to submacros may each pass -`visibility = visibility`, re-exporting the inner macro's exported targets to -the caller at each level, without exposing any of the macros' implementation -details. - -#### Delegating privileges to a submacro - -The visibility model has a special feature to allow a macro to delegate its -permissions to a submacro. This is important for factoring and composing macros. - -Suppose you have a macro `my_macro` that creates a dependency edge using a rule -`some_library` from another package: - -```starlark -# //macro/defs.bzl -load("//lib:defs.bzl", "some_library") - -def _impl(name, visibility, ...): - ... - native.genrule( - name = name + "_dependency" - ... - ) - some_library( - name = name + "_consumer", - deps = [name + "_dependency"], - ... - ) - -my_macro = macro(implementation = _impl, ...) -``` - -```starlark -# //pkg/BUILD - -load("//macro:defs.bzl", "my_macro") - -my_macro(name = "foo", ...) -``` - -The `//pkg:foo_dependency` target has no `visibility` specified, so it is only -visible within `//macro`, which works fine for the consuming target. Now, what -happens if the author of `//lib` refactors `some_library` to instead be -implemented using a macro? - -```starlark -# //lib:defs.bzl - -def _impl(name, visibility, deps, ...): - some_rule( - # Main target, exported. - name = name, - visibility = visibility, - deps = deps, - ...) - -some_library = macro(implementation = _impl, ...) -``` - -With this change, `//pkg:foo_consumer`'s location is now `//lib` rather than -`//macro`, so its usage of `//pkg:foo_dependency` violates the dependency's -visibility. The author of `my_macro` can't be expected to pass -`visibility = ["//lib"]` to the declaration of the dependency just to work -around this implementation detail. - -For this reason, when a dependency of a target is also an attribute value of the -macro that declared the target, we check the dependency's visibility against the -location of the macro instead of the location of the consuming target. - -In this example, to validate whether `//pkg:foo_consumer` can see -`//pkg:foo_dependency`, we see that `//pkg:foo_dependency` was also passed as an -input to the call to `some_library` inside of `my_macro`, and instead check the -dependency's visibility against the location of this call, `//macro`. - -This process can repeat recursively, as long as a target or macro declaration is -inside of another symbolic macro taking the dependency's label in one of its -label-typed attributes. - -Note: Visibility delegation does not work for labels that were not passed into -the macro, such as labels derived by string manipulation. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target declarations into a macro in a `.bzl` file. Without the protection of -load visibility, they might find their macro reused by other collaborators in -the same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```starlark -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```starlark -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```starlark -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```starlark -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```starlark -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```starlark -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```starlark -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```starlark -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```starlark -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```starlark -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/8.1.1/configure/attributes.mdx b/8.1.1/configure/attributes.mdx deleted file mode 100644 index 7bc3f41..0000000 --- a/8.1.1/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.1.1/configure/best-practices.mdx b/8.1.1/configure/best-practices.mdx deleted file mode 100644 index abef72e..0000000 --- a/8.1.1/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `MODULE.bazel` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/8.1.1/configure/coverage.mdx b/8.1.1/configure/coverage.mdx deleted file mode 100644 index 9a50db0..0000000 --- a/8.1.1/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --branch-coverage --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/sphinx/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/8.1.1/contribute/breaking-changes.mdx b/8.1.1/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/8.1.1/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/8.1.1/contribute/codebase.mdx b/8.1.1/contribute/codebase.mdx deleted file mode 100644 index 8a13611..0000000 --- a/8.1.1/contribute/codebase.mdx +++ /dev/null @@ -1,1670 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `exec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". With complex SkyFunctions, state management - between restarts can get tricky, so - [`StateMachine`s](/contribute/statemachine-guide) were introduced for a - structured approach to logical concurrency, including hooks to suspend and - resume hierarchical computations within a `SkyFunction`. Example: - [`DependencyResolver#computeDependencies`][statemachine_example] - uses a `StateMachine` with `getState()` to compute the potentially huge set - of direct dependencies of a configured target, which otherwise can result in - expensive restarts. - -[statemachine_example]: https://developers.google.com/devsite/reference/markdown/links#reference_links - -Fundamentally, Bazel need these types of workarounds because hundreds of -thousands of in-flight Skyframe nodes is common, and Java's support of -lightweight threads [does not outperform][virtual_threads] the -`StateMachine` implementation as of 2023. - -[virtual_threads]: /contribute/statemachine-guide#epilogue_eventually_removing_callbacks - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to learn things about other configured targets that they depend on, and the only -way to tell things about themselves to other configured targets that depend on -them. The reason why "transitive" is in their name is that this is usually some -sort of roll-up of the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source or output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where an -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It was implemented in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can limit which -targets can depend on a particular target using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used for one -special case: - -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is _mostly_ written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/8.1.1/contribute/design-documents.mdx b/8.1.1/contribute/design-documents.mdx deleted file mode 100644 index 1fe70b9..0000000 --- a/8.1.1/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, MODULE.bazel, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/8.1.1/contribute/docs-style-guide.mdx b/8.1.1/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/8.1.1/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/8.1.1/contribute/docs.mdx b/8.1.1/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/8.1.1/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/8.1.1/contribute/index.mdx b/8.1.1/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/8.1.1/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/8.1.1/contribute/maintainers-guide.mdx b/8.1.1/contribute/maintainers-guide.mdx deleted file mode 100644 index d5edf45..0000000 --- a/8.1.1/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [meisterT](https://github.com/meisterT) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [buildbreaker2021](https://github.com/buildbreaker2021) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/8.1.1/contribute/naming.mdx b/8.1.1/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/8.1.1/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/8.1.1/contribute/patch-acceptance.mdx b/8.1.1/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/8.1.1/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/8.1.1/contribute/policy.mdx b/8.1.1/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/8.1.1/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/8.1.1/contribute/release-notes.mdx b/8.1.1/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/8.1.1/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/8.1.1/contribute/statemachine-guide.mdx b/8.1.1/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/8.1.1/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/8.1.1/contribute/windows-chocolatey-maintenance.mdx b/8.1.1/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/8.1.1/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/8.1.1/contribute/windows-scoop-maintenance.mdx b/8.1.1/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/8.1.1/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/8.1.1/docs/android-build-performance.mdx b/8.1.1/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/8.1.1/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/8.1.1/docs/android-instrumentation-test.mdx b/8.1.1/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/8.1.1/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/8.1.1/docs/android-ndk.mdx b/8.1.1/docs/android-ndk.mdx deleted file mode 100644 index b10a566..0000000 --- a/8.1.1/docs/android-ndk.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the target ABI - -To configure the target ABI, use the `--android_platforms` flag as follows: - -```posix-terminal -bazel build //:app --android_platforms={{ "" }}comma-separated list of platforms{{ "" }} -``` - -Just like the `--platforms` flag, the values passed to `--android_platforms` are -the labels of [`platform`](https://bazel.build/reference/be/platforms-and-toolchains#platform) -targets, using standard constraint values to describe your device. - -For example, for an Android device with a 64-bit ARM processor, you'd define -your platform like this: - -```py -platform( - name = "android_arm64", - constraint_values = [ - "@platforms//os:android", - "@platforms//cpu:arm64", - ], -) -``` - -Every Android `platform` should use the [`@platforms//os:android`](https://github.com/bazelbuild/platforms/blob/33a3b209f94856193266871b1545054afb90bb28/os/BUILD#L36) -OS constraint. To migrate the CPU constraint, check this chart: - -CPU Value | Platform -------------- | ------------------------------------------ -`armeabi-v7a` | `@platforms//cpu:armv7` -`arm64-v8a` | `@platforms//cpu:arm64` -`x86` | `@platforms//cpu:x86_32` -`x86_64` | `@platforms//cpu:x86_64` - -And, of course, for a multi-architecture APK, you pass multiple labels, for -example: `--android_platforms=//:arm64,//:x86_64` (assuming you defined those in -your top-level `BUILD.bazel` file). - -Bazel is unable to select a default Android platform, so one must be defined and -specified with `--android_platforms`. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | -| C++17 | `--cxxopt=-std=c++17` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--platforms` flag. - -For example, assuming you have defined Android platforms in -`my/platforms/BUILD`: - -```posix-terminal -bazel build //my/cc/jni:target \ - --platforms=//my/platforms:x86_64 -``` - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --platforms=//my/platforms:x86 - -common:android_armeabi-v7a --platforms=//my/platforms:armeabi-v7a - -# In general -common:android_ --platforms=//my/platforms: -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/8.1.1/docs/bazel-and-android.mdx b/8.1.1/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/8.1.1/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/8.1.1/docs/bazel-and-apple.mdx b/8.1.1/docs/bazel-and-apple.mdx deleted file mode 100644 index 6e4a06f..0000000 --- a/8.1.1/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/8.1.1/docs/bazel-and-cpp.mdx b/8.1.1/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/8.1.1/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/8.1.1/docs/bazel-and-java.mdx b/8.1.1/docs/bazel-and-java.mdx deleted file mode 100644 index e9476aa..0000000 --- a/8.1.1/docs/bazel-and-java.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using the `local_java_repository` or -`remote_java_repository` repo rules in a module extension. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@rules_java//toolchains:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@rules_java//toolchains:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `MODULE.bazel` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@rules_java//toolchains:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@rules_java//toolchains:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@rules_java//toolchains:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/8.1.1/docs/bazel-and-javascript.mdx b/8.1.1/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/8.1.1/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/8.1.1/docs/configurable-attributes.mdx b/8.1.1/docs/configurable-attributes.mdx deleted file mode 100644 index 3515852..0000000 --- a/8.1.1/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.1.1/docs/sandboxing.mdx b/8.1.1/docs/sandboxing.mdx deleted file mode 100644 index 6869795..0000000 --- a/8.1.1/docs/sandboxing.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel and debugging your sandboxing -environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/8.1.1/extending/aspects.mdx b/8.1.1/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/8.1.1/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/8.1.1/extending/auto-exec-groups.mdx b/8.1.1/extending/auto-exec-groups.mdx deleted file mode 100644 index abba3d5..0000000 --- a/8.1.1/extending/auto-exec-groups.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 'Automatic Execution Groups (AEGs)' ---- - - -Automatic execution groups select an [execution platform][exec_platform] -for each toolchain type. In other words, one target can have multiple -execution platforms without defining execution groups. - -## Quick summary - -Automatic execution groups are closely connected to toolchains. If you are using -toolchains, you need to set them on the affected actions (actions which use an -executable or a tool from a toolchain) by adding `toolchain` parameter. For -example: - -```python -ctx.actions.run( - ..., - executable = ctx.toolchain['@bazel_tools//tools/jdk:toolchain_type'].tool, - ..., - toolchain = '@bazel_tools//tools/jdk:toolchain_type', -) -``` -If the action does not use a tool or executable from a toolchain, and Blaze -doesn't detect that ([the error](#first-error-message) is raised), you can set -`toolchain = None`. - -If you need to use multiple toolchains on a single execution platform (an action -uses executable or tools from two or more toolchains), you need to manually -define [exec_groups][exec_groups] (check -[When should I use a custom exec_group?][multiple_toolchains_exec_groups] -section). - -## History - -Before AEGs, the execution platform was selected on a rule level. For example: - -```python -my_rule = rule( - _impl, - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], -) -``` - -Rule `my_rule` registers two toolchain types. This means that the [Toolchain -Resolution](https://bazel.build/extending/toolchains#toolchain-resolution) used -to find an execution platform which supports both toolchain types. The selected -execution platform was used for each registered action inside the rule, unless -specified differently with [exec_groups][exec_groups]. -In other words, all actions inside the rule used to have a single execution -platform even if they used tools from different toolchains (execution platform -is selected for each target). This resulted in failures when there was no -execution platform supporting all toolchains. - -## Current state - -With AEGs, the execution platform is selected for each toolchain type. The -implementation function of the earlier example, `my_rule`, would look like: - -```python -def _impl(ctx): - ctx.actions.run( - mnemonic = "First action", - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - toolchain = '//tools:toolchain_type_1', - ) - - ctx.actions.run( - mnemonic = "Second action", - executable = ctx.toolchain['//tools:toolchain_type_2'].tool, - toolchain = '//tools:toolchain_type_2', - ) -``` - -This rule creates two actions, the `First action` which uses executable from a -`//tools:toolchain_type_1` and the `Second action` which uses executable from a -`//tools:toolchain_type_2`. Before AEGs, both of these actions would be executed -on a single execution platform which supports both toolchain types. With AEGs, -by adding the `toolchain` parameter inside the actions, each action executes on -the execution platform that provides the toolchain. The actions may be executed -on different execution platforms. - -The same is effective with [ctx.actions.run_shell][run_shell] where `toolchain` -parameter should be added when `tools` are from a toolchain. - -## Difference between custom exec groups and automatic exec groups - -As the name suggests, AEGs are exec groups created automatically for each -toolchain type registered on a rule. There is no need to manually specify them, -unlike the "classic" exec groups. - -### When should I use a custom exec_group? - -Custom exec_groups are needed only in case where multiple toolchains need to -execute on a single execution platform. In all other cases there's no need to -define custom exec_groups. For example: - -```python -def _impl(ctx): - ctx.actions.run( - ..., - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - tools = [ctx.toolchain['//tools:toolchain_type_2'].tool], - exec_group = 'two_toolchains', - ) -``` - -```python -my_rule = rule( - _impl, - exec_groups = { - "two_toolchains": exec_group( - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], - ), - } -) -``` - -## Migration of AEGs - -Internally in google3, Blaze is already using AEGs. -Externally for Bazel, migration is in the process. Some rules are already using -this feature (e.g. Java and C++ rules). - -### Which Bazel versions support this migration? - -AEGs are fully supported from Bazel 7. - -### How to enable AEGs? - -Set `--incompatible_auto_exec_groups` to true. More information about the flag -on [the GitHub issue][github_flag]. - -### How to enable AEGs inside a particular rule? - -Set the `_use_auto_exec_groups` attribute on a rule. - -```python -my_rule = rule( - _impl, - attrs = { - "_use_auto_exec_groups": attr.bool(default = True), - } -) -``` -This enables AEGs only in `my_rule` and its actions start using the new logic -when selecting the execution platform. Incompatible flag is overridden with this -attribute. - -### How to disable AEGs in case of an error? - -Set `--incompatible_auto_exec_groups` to false to completely disable AEGs in -your project ([flag's GitHub issue][github_flag]), or disable a particular rule -by setting `_use_auto_exec_groups` attribute to `False` -([more details about the attribute](#how-enable-particular-rule)). - -### Error messages while migrating to AEGs - -#### Couldn't identify if tools are from implicit dependencies or a toolchain. Please set the toolchain parameter. If you're not using a toolchain, set it to 'None'. - * In this case you get a stack of calls before the error happened and you can - clearly see which exact action needs the toolchain parameter. Check which - toolchain is used for the action and set it with the toolchain param. If no - toolchain is used inside the action for tools or executable, set it to - `None`. - -#### Action declared for non-existent toolchain '[toolchain_type]'. - * This means that you've set the toolchain parameter on the action but didn't -register it on the rule. Register the toolchain or set `None` inside the action. - -## Additional material - -For more information, check design document: -[Automatic exec groups for toolchains][aegs_design_doc]. - -[exec_platform]: https://bazel.build/extending/platforms#:~:text=Execution%20%2D%20a%20platform%20on%20which%20build%20tools%20execute%20build%20actions%20to%20produce%20intermediate%20and%20final%20outputs. -[exec_groups]: https://bazel.build/extending/exec-groups -[github_flag]: https://github.com/bazelbuild/bazel/issues/17134 -[aegs_design_doc]: https://docs.google.com/document/d/1-rbP_hmKs9D639YWw5F_JyxPxL2bi6dSmmvj_WXak9M/edit#heading=h.5mcn15i0e1ch -[run_shell]: https://bazel.build/rules/lib/builtins/actions#run_shell -[multiple_toolchains_exec_groups]: /extending/auto-exec-groups#when-should-use-exec-groups diff --git a/8.1.1/extending/concepts.mdx b/8.1.1/extending/concepts.mdx deleted file mode 100644 index eb1d6b8..0000000 --- a/8.1.1/extending/concepts.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A macro is a function that instantiates rules. Macros come in two flavors: -[symbolic macros](/extending/macros) (new in Bazel 8) and [legacy -macros](/extending/legacy-macros). The two flavors of macros are defined -differently, but behave almost the same from the point of view of a user. A -macro is useful when a `BUILD` file is getting too repetitive or too complex, as -it lets you reuse some code. The function is evaluated as soon as the `BUILD` -file is read. After the evaluation of the `BUILD` file, Bazel has little -information about macros. If your macro generates a `genrule`, Bazel will -behave *almost* as if you declared that `genrule` in the `BUILD` file. (The one -exception is that targets declared in a symbolic macro have [special visibility -semantics](/extending/macros#visibility): a symbolic macro can hide its internal -targets from the rest of the package.) - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro; we recommend a symbolic -macro, unless you need to support older Bazel versions. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most users will -never have to write one; they will only load and call existing rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to reuse some code. - Then [learn more about macros](/extending/macros) and [using them to create - "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you -may want to write [aspects](/extending/aspects) and [repository -rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/8.1.1/extending/depsets.mdx b/8.1.1/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/8.1.1/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/8.1.1/extending/exec-groups.mdx b/8.1.1/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/8.1.1/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/8.1.1/extending/platforms.mdx b/8.1.1/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/8.1.1/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/8.1.1/extending/repo.mdx b/8.1.1/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/8.1.1/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/8.1.1/extending/rules.mdx b/8.1.1/extending/rules.mdx deleted file mode 100644 index c91939e..0000000 --- a/8.1.1/extending/rules.mdx +++ /dev/null @@ -1,1244 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`genrule` and `filegroup`, provide some core support. -By defining your own rules, you can add support for languages and tools -that Bazel doesn't support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function doesn't run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [rule kind](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable = True`), or specifically -a test executable (with `test = True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule can't be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user doesn't -specify it in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which can't be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can't actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency using that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files using a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -There's a legacy struct style, which is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel doesn't -perform file I/O during the analysis phase, these objects can't be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files can't be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies using that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file = True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable = True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). -Often, the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive = [headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with = ",") - args.add_joined("-s", srcs, join_with = ",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it can't depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs won't be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which decides which actions to -execute. It is an error if there is a cycle in the dependency graph. Creating -an action doesn't guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of provider objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions won't be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields = { - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }, -) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances satisfy certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments don't match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# Keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {"files_to_link": files_to_link, "headers": all_headers} - -ExampleInfo, _new_exampleinfo = provider( - fields = ["files_to_link", "headers"], - init = _exampleinfo_init, -) -``` - -A rule implementation may then instantiate the provider as follows: - -```python -ExampleInfo( - files_to_link = my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers -) -``` - -The raw constructor can be used to define alternative public factory functions -that don't go through the `init` logic. For example, exampleinfo.bzl -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code can't load it and -generate arbitrary provider instances. - -Another use for `init` is to prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable = True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you don't specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it doesn't support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes can't be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size = "small", **kwargs): - _example_test(size = size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment, or command line -argument or flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs using [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they don't define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built-in the -"target" configuration, while tools that should run on the execution platform -are built-in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable = True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg = "exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg = "target"` in -the attribute. - -`cfg = "target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable = False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg = my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp` and `java`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns -`True` in coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"exec"` in the attribute schema. in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules can't do this validation at analysis time because rules can't read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action doesn't produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action won't be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path], - ) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec" - ), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel doesn't -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a [`struct`](/rules/lib/builtins/struct) returned by the rule's -implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Such providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -*This style is deprecated and should not be used in new code;* see following for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x = "foo", ...) - modern_data = MyInfo(y = "bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus don't create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the preceding syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept or require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/8.1.1/extending/toolchains.mdx b/8.1.1/extending/toolchains.mdx deleted file mode 100644 index b904cbe..0000000 --- a/8.1.1/extending/toolchains.mdx +++ /dev/null @@ -1,600 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as -input a set of required toolchain types, the target platform, the list of -available execution platforms, and the list of available toolchains. Its outputs -are a selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. The list of available toolchains is filtered to remove any toolchains - specifying `target_settings` that don't match the current configuration. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/8.1.1/external/advanced.mdx b/8.1.1/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/8.1.1/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/8.1.1/external/lockfile.mdx b/8.1.1/external/lockfile.mdx deleted file mode 100644 index f2a75b2..0000000 --- a/8.1.1/external/lockfile.mdx +++ /dev/null @@ -1,277 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/8.1.1/external/module.mdx b/8.1.1/external/module.mdx deleted file mode 100644 index 6a9cf13..0000000 --- a/8.1.1/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}+{{ "" }}version{{ -"" }}` (for example, `bazel_skylib+1.0.3`) or `{{ "" }}module_name{{ -"" }}+` (for example, `bazel_features+`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/8.1.1/help.mdx b/8.1.1/help.mdx deleted file mode 100644 index b2976e6..0000000 --- a/8.1.1/help.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2023](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsefrwb_ySGRi_bvQejpO_Tj) - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/8.1.1/install/bazelisk.mdx b/8.1.1/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/8.1.1/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/8.1.1/install/compile-source.mdx b/8.1.1/install/compile-source.mdx deleted file mode 100644 index a228b22..0000000 --- a/8.1.1/install/compile-source.mdx +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/8.1.1/install/completion.mdx b/8.1.1/install/completion.mdx deleted file mode 100644 index 856784c..0000000 --- a/8.1.1/install/completion.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/8.1.1/install/docker-container.mdx b/8.1.1/install/docker-container.mdx deleted file mode 100644 index 3a5d017..0000000 --- a/8.1.1/install/docker-container.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -ubuntu@5a99103747c6:~$ git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -ubuntu@5a99103747c6:~$ -``` - -Explore the container contents. - -```posix-terminal -ubuntu@5a99103747c6:~$ gcc --version -gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 -Copyright (C) 2019 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -ubuntu@5a99103747c6:~$ java -version -openjdk version "1.8.0_362" -OpenJDK Runtime Environment (build 1.8.0_362-8u372-ga~us1-0ubuntu1~20.04-b09) -OpenJDK 64-Bit Server VM (build 25.362-b09, mixed mode) - -ubuntu@5a99103747c6:~$ python -V -Python 3.8.10 - -ubuntu@5a99103747c6:~$ bazel version -WARNING: Invoking Bazel in batch mode since it is not invoked from within a workspace (below a directory having a WORKSPACE file). -Extracting Bazel installation... -Build label: 6.2.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Jun 2 16:59:58 2023 (1685725198) -Build timestamp: 1685725198 -Build timestamp as int: 1685725198 -``` - -## Explore the Bazel Dockerfile - -If you want to check how the Bazel Docker image is built, you can find its Dockerfile at [bazelbuild/continuous-integration/bazel/oci](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). diff --git a/8.1.1/install/ide.mdx b/8.1.1/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/8.1.1/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/8.1.1/install/index.mdx b/8.1.1/install/index.mdx deleted file mode 100644 index 10f53c4..0000000 --- a/8.1.1/install/index.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -You can find available Bazel releases on our [release page](/release). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Homebrew](https://formulae.brew.sh/formula/bazel) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/8.1.1/install/os-x.mdx b/8.1.1/install/os-x.mdx deleted file mode 100644 index 9a0f3f8..0000000 --- a/8.1.1/install/os-x.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" - -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/8.1.1/install/suse.mdx b/8.1.1/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/8.1.1/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/8.1.1/install/ubuntu.mdx b/8.1.1/install/ubuntu.mdx deleted file mode 100644 index a31bd2f..0000000 --- a/8.1.1/install/ubuntu.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y - -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg - -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings - -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel - -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/8.1.1/migrate/index.mdx b/8.1.1/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/8.1.1/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/8.1.1/migrate/maven.mdx b/8.1.1/migrate/maven.mdx deleted file mode 100644 index 38aaffc..0000000 --- a/8.1.1/migrate/maven.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences between Maven -and Bazel, and provides a migration example using the Guava project. - -When migrating from any build tool to Bazel, it's best to have both build tools -running in parallel until you have fully migrated your development team, CI -system, and any other relevant systems. You can run Maven and Bazel in the same -repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -, it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial [Introduction to Bazel: - Build Java](/start/java) before you start migrating. The tutorial explains - Bazel's concepts, structure, and label syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build files - and multiple targets per `BUILD` file, allowing for builds that are more - incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does not - automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java - package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the MODULE.bazel file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the [Guava -project](https://github.com/google/guava) from Maven to Bazel. The -Guava project used is release `v31.1`. The examples using Guava do not walk -through each step in the migration, but they do show the files and contents that -are generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the MODULE.bazel file - -Create a file named `MODULE.bazel` at the root of your project. If your project -has no external dependencies, this file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the MODULE.bazel -file. You can use `rules_jvm_external` to manage dependencies from Maven. For -instructions about using this ruleset, see [the -README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external) -. - -#### Guava project example: external dependencies - -You can list the external dependencies of the [Guava -project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_jvm_external", version = "6.2") -maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") -maven.install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -use_repo(maven, "maven") -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your -project should be built. Unlike Maven with its one `pom.xml` file, Bazel can use -many `BUILD` files to build a project. These files specify multiple build -targets, which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file at the root of -your project and using it to do an initial build using Bazel. Then, you refine -your build by adding more `BUILD` files with more granular targets. - -1. In the same directory as your `MODULE.bazel` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to build - your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples - above, the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. - * Take a look at the [example below of this top-level BUILD - file](#guava-2) from the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build your - project to ensure that it works. On the command line, from your workspace - directory, use `bazel build //:everything` to build your project with Bazel. - - The project has now been successfully built with Bazel. You will need to add - more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used to -build the entire project. Here are the contents of this initial `BUILD` file in -the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - javacopts = ["-XepDisableAllChecks"], - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your -first build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with Java - packages that have the fewest dependencies and work you way up to packages - with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number of packages - grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add a - `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build file. - Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel to -produce all of your builds. diff --git a/8.1.1/migrate/xcode.mdx b/8.1.1/migrate/xcode.mdx deleted file mode 100644 index 986cd11..0000000 --- a/8.1.1/migrate/xcode.mdx +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps for -converting an Xcode project to a Bazel project. It also provides troubleshooting -solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present within - the workspace directory or specified as dependencies in the `MODULE.bazel` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the [iOS app - tutorial](/start/ios-app)). You should understand the Bazel workspace, - including the `MODULE.bazel` and `BUILD` files, as well as the concepts of - targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see [Working with external -dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `MODULE.bazel` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `MODULE.bazel` file - -Create a `MODULE.bazel` file in a new directory. This directory becomes the -Bazel workspace root. If the project uses no external dependencies, this file -can be empty. If the project depends on files or packages that are not in one of -the project's directories, specify these external dependencies in the -`MODULE.bazel` file. - -Note: Place the project source code within the directory tree containing the -`MODULE.bazel` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with -[swift_bazel](https://github.com/cgrindel/swift_bazel), you must -convert them into Bazel packages as described in the [following -tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/) -. - -Note: SwiftPM support is a manual process with many variables. SwiftPM -integration with Bazel has not been fully verified and is not officially -supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create the -`BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see [Workspaces, -packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a -[`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an -[`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build -rules](https://github.com/bazelbuild/rules_apple) support running -unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) - to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for - [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), - [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) - and - [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) target for each -Objective-C library and a -[`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) function to include -all sources and/or headers of a certain type. Use it carefully as it might -include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in -the [rules_apple examples -directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For -example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - -For more information on build rules, see [Apple Rules for -Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number of - packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix build - errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `MODULE.bazel` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features) -. - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/8.1.1/query/aquery.mdx b/8.1.1/query/aquery.mdx deleted file mode 100644 index 2176ff6..0000000 --- a/8.1.1/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noslim_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/8.1.1/query/cquery.mdx b/8.1.1/query/cquery.mdx deleted file mode 100644 index bd829c8..0000000 --- a/8.1.1/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but -[`allrdeps`](/query/language#allrdeps), -[`buildfiles`](/query/language#buildfiles), -[`rbuildfiles`](/query/language#rbuildfiles), -[`siblings`](/query/language#siblings), [`tests`](/query/language#tests), and -[`visible`](/query/language#visible). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a previous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genrule` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/8.1.1/reference/glossary.mdx b/8.1.1/reference/glossary.mdx deleted file mode 100644 index 3b0b497..0000000 --- a/8.1.1/reference/glossary.mdx +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Legacy macro - -A flavor of [macro](#macro) which is declared as an ordinary -[Starlark](#starlark) function, and which runs as a side effect of executing a -`BUILD` file. - -Legacy macros can do anything a function can. This means they can be convenient, -but they can also be harder to read, write, and use. A legacy macro might -unexpectedly mutate its arguments or fail when given a `select()` or ill-typed -argument. - -Contrast with [symbolic macros](#symbolic-macro). - -**See also:** [Legacy macro documentation](/extending/legacy-macros) - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) callable. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -Comes in two flavors: [symbolic macros](#symbolic-macro) (since Bazel 8) and -[legacy macros](#legacy-macro). - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Symbolic macro - -A flavor of [macro](#macro) which is declared with a [rule](#rule)-like -[attribute](#attribute) schema, allows hiding internal declared -[targets](#target) from their own package, and enforces a predictable naming -pattern on the targets that the macro declares. Designed to avoid some of the -problems seen in large [legacy macro](#legacy-macro) codebases. - -**See also:** [Symbolic macro documentation](/extending/macros) - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/8.1.1/reference/skyframe.mdx b/8.1.1/reference/skyframe.mdx deleted file mode 100644 index ba9149f..0000000 --- a/8.1.1/reference/skyframe.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build is achieved by evaluating the node that represents the build request. - -First, Bazel finds the `SkyFunction` corresponding to the key of the top-level -`SkyKey`. The function then requests the evaluation of the nodes it needs to -evaluate the top-level node, which in turn result in other `SkyFunction` calls, -until the leaf nodes are reached. Leaf nodes are usually ones that represent -input files in the file system. Finally, Bazel ends up with the value of the -top-level `SkyValue`, some side effects (such as output files in the file -system) and a directed acyclic graph of the dependencies between the nodes -involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in -advance all of the nodes it needs to do its job. A simple example is evaluating -an input file node that turns out to be a symlink: the function tries to read -the file, realizes that it is a symlink, and thus fetches the file system node -representing the target of the symlink. But that itself can be a symlink, in -which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the -services provided to it by an interface called `SkyFunction.Environment`. These -are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If - the node is available, its value is returned, otherwise, `null` is returned - and the function itself is expected to return `null`. In the latter case, - the dependent node is evaluated, and then the original node builder is - invoked again, but this time the same `env.getValue` call will return a - non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. - This does essentially the same, except that the dependent nodes are - evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs - to be taken that two different functions avoid stepping on each other's - toes. In general, write side effects (where data flows outwards from Bazel) - are okay, read side effects (where data flows inwards into Bazel without a - registered dependency) are not, because they are an unregistered dependency - and as such, can cause incorrect incremental builds. - -Well-behaved `SkyFunction` implementations avoid accessing data in any other way -than requesting dependencies (such as by directly reading the file system), -because that results in Bazel not registering the data dependency on the file -that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` -value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on - other nodes, Bazel can guarantee that if the input state is the same, the - same data is returned. If all sky functions are deterministic, this means - that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions - is recorded, Bazel can invalidate only the exact set of nodes that need to - be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of - requesting dependencies, functions that don't depend on each other can be - run in parallel and Bazel can guarantee that the result is the same as if - they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel -can build up a complete data flow graph from the input files to the output -files, and use this information to only rebuild those nodes that actually need -to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one -and the top-down one. Which one is optimal depends on how the dependency graph -looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed - inputs is known, all the nodes are invalidated that transitively depend on - changed files. This is optimal if the same top-level node will be built - again. Note that bottom-up invalidation requires running `stat()` on all - input files of the previous build to determine if they were changed. This - can be improved by using `inotify` or a similar mechanism to learn about - changed files. - -* During top-down invalidation, the transitive closure of the top-level node - is checked and only those nodes are kept whose transitive closure is clean. - This is better if the node graph is large, but the next build only needs a - small subset of it: bottom-up invalidation would invalidate the larger graph - of the first build, unlike top-down invalidation, which just walks the small - graph of second build. - -Bazel only does bottom-up invalidation. - -To get further incrementality, Bazel uses _change pruning_: if a node is -invalidated, but upon rebuild, it is discovered that its new value is the same -as its old value, the nodes that were invalidated due to a change in this node -are "resurrected". - -This is useful, for example, if one changes a comment in a C++ file: then the -`.o` file generated from it will be the same, thus, it is unnecessary to call -the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an -all-or-nothing affair: when a dependency changes, the dependent node is always -rebuilt from scratch, even if a better algorithm would exist that would mutate -the old value of the node based on the changes. A few examples where this would -be useful: - -* Incremental linking -* When a single class file changes in a JAR file, it is possible - modify the JAR file in-place instead of building it from scratch again. - -The reason why Bazel does not support these things in a principled way -is twofold: - -* There were limited performance gains. -* Difficulty to validate that the result of the mutation is the same as that - of a clean rebuild would be, and Google values builds that are bit-for-bit - repeatable. - -Until now, it was possible to achieve good enough performance by decomposing an -expensive build step and achieving partial re-evaluation that way. For example, -in an Android app, you can split all the classes into multiple groups and dex -them separately. This way, if classes in a group are unchanged, the dexing does -not have to be redone. - -## Mapping to Bazel concepts - -This is high level summary of the key `SkyFunction` and `SkyValue` -implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, the - function also computes additional information in order to detect changes to - the file. This is the lowest level node in the Skyframe graph and has no - dependencies. -* **FileValue**. Used by anything that cares about the actual contents or - resolved path of a file. Depends on the corresponding `FileStateValue` and - any symlinks that need to be resolved (such as the `FileValue` for `a/b` - needs the resolved path of `a` and the resolved path of `a/b`). The - distinction between `FileValue` and `FileStateValue` is important because - the latter can be used in cases where the contents of the file are not - actually needed. For example, the file contents are irrelevant when - evaluating file system globs (such as `srcs=glob(["*/*.java"])`). -* **DirectoryListingStateValue**. The result of `readdir()`. Like - `FileStateValue`, this is the lowest level node and has no dependencies. -* **DirectoryListingValue**. Used by anything that cares about the entries of - a directory. Depends on the corresponding `DirectoryListingStateValue`, as - well as the associated `FileValue` of the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on - the `FileValue` of the associated `BUILD` file, and also transitively on any - `DirectoryListingValue` that is used to resolve the globs in the package - (the data structure representing the contents of a `BUILD` file internally). -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple - of the set of actions generated during the analysis of a target and - information provided to dependent configured targets. Depends on the - `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` - of direct dependencies, and a special node representing the build - configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an - output artifact. Artifacts are almost equivalent to files, and are used to - refer to files during the actual execution of build steps. Source files - depends on the `FileValue` of the associated node, and output artifacts - depend on the `ActionExecutionValue` of whatever action generates the - artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on - the `ArtifactValues` of its input files. The action it executes is contained - within its SkyKey, which is contrary to the concept that SkyKeys should be - small. Note that `ActionExecutionValue` and `ArtifactValue` are unused if - the execution phase does not run. - -As a visual aid, this diagram shows the relationships between -SkyFunction implementations after a build of Bazel itself: - -![A graph of SkyFunction implementation relationships](/reference/skyframe.png) diff --git a/8.1.1/release/backward-compatibility.mdx b/8.1.1/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/8.1.1/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/8.1.1/release/index.mdx b/8.1.1/release/index.mdx deleted file mode 100644 index a3cc526..0000000 --- a/8.1.1/release/index.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 9 | Rolling| [Check rolling release page](/release/rolling) | N/A | -| Bazel 8 | Active| [8.0.0](https://github.com/bazelbuild/bazel/releases/tag/8.0.0) | December 2027 | -| Bazel 7 | Maintenance| [7.4.1](https://github.com/bazelbuild/bazel/releases/tag/7.4.1) | Dec 2026 | -| Bazel 6 | Maintenance | [6.5.0](https://github.com/bazelbuild/bazel/releases/tag/6.5.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Deprecated | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel LTS releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 5 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Backport changes using Cherry-Pick Request Issue for Bazel maintainers. - * Bazel maintainers can request to cherry-pick specific commit(s) - to a release branch. This process is initiated by creating a - cherry-pick request on GitHub. Here's how to do it. - 1. Open the [cherry-pick request](https://github.com/bazelbuild/bazel/issues/new?assignees=&labels=&projects=&template=cherry_pick_request.yml) - 2. Fill in the request details - * Title: Provide a concise and descriptive title for the request. - * Commit ID(s): Enter the ID(s) of the commit(s) you want to - cherry-pick. If there are multiple commits, then separate - them with commas. - * Category: Specify the category of the request. - * Reviewer(s): For multiple reviewers, separate their GitHub - ID's with commas. - 3. Set the milestone - * Find the "Milestone" section and click the setting. - * Select the appropriate X.Y.Z release blockers. This action - triggers the cherry-pick bot to process your request - for the "release-X.Y.Z" branch. - 4. Submit the Issue - * Once all details are filled in and the miestone is set, - submit the issue. - - * The cherry-pick bot will process the request and notify - if the commit(s) are eligible for cherry-picking. If - the commits are cherry-pickable, which means there's no - merge conflict while cherry-picking the commit, then - the bot will create a new pull request. When the pull - request is approved by a member of the Bazel team, the - commits are cherry-picked and merged to the release branch. - For a visual example of a completed cherry-pick request, - refer to this - [example](https://github.com/bazelbuild/bazel/issues/20230) - . - -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created; cherry-picks are limited to critical - fixes only. If a cherry-pick is needed, the requester must answer the - following questions: Why is this change critical, and what benefits does - it provide? What is the likelihood of this change introducing a - regression? -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/8.1.1/release/rule-compatibility.mdx b/8.1.1/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/8.1.1/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/8.1.1/remote/bep-examples.mdx b/8.1.1/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/8.1.1/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/8.1.1/remote/bep-glossary.mdx b/8.1.1/remote/bep-glossary.mdx deleted file mode 100644 index 3bd11ee..0000000 --- a/8.1.1/remote/bep-glossary.mdx +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/8.1.1/remote/bep.mdx b/8.1.1/remote/bep.mdx deleted file mode 100644 index bafdaa9..0000000 --- a/8.1.1/remote/bep.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L131-L140) -or [structured -information](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L194-L205) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L1276). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--bes_instance_name` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/8.1.1/remote/cache-local.mdx b/8.1.1/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/8.1.1/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/8.1.1/remote/cache-remote.mdx b/8.1.1/remote/cache-remote.mdx deleted file mode 100644 index a614f4f..0000000 --- a/8.1.1/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/8.1.1/remote/caching.mdx b/8.1.1/remote/caching.mdx deleted file mode 100644 index 8fd6adc..0000000 --- a/8.1.1/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [NativeLink](https://github.com/TraceMachina/nativelink) diff --git a/8.1.1/remote/creating.mdx b/8.1.1/remote/creating.mdx deleted file mode 100644 index 0e46a07..0000000 --- a/8.1.1/remote/creating.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output message describing any errors encountered in processing or executing -the request. A worker should capture the `stdout` and `stderr` of any tool it -calls and report them through the `WorkResponse`. Writing it to the `stdout` of -the worker process is unsafe, as it will interfere with the worker protocol. -Writing it to the `stderr` of the worker process is safe, but the result is -collected in a per-worker log file instead of ascribed to individual actions. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/8.1.1/remote/multiplex.mdx b/8.1.1/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/8.1.1/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/8.1.1/remote/output-directories.mdx b/8.1.1/remote/output-directories.mdx deleted file mode 100644 index bdbe029..0000000 --- a/8.1.1/remote/output-directories.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `${XDG_CACHE_HOME}/bazel` (or - `~/.cache/bazel`, if the `XDG_CACHE_HOME` environment variable is not set) on - Linux, `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <..._main> <== Symlink to execRoot - bazel-out => <...bazel-out> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - _tmp/actions/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/8.1.1/remote/persistent.mdx b/8.1.1/remote/persistent.mdx deleted file mode 100644 index 1a56946..0000000 --- a/8.1.1/remote/persistent.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/8.1.1/remote/rbe.mdx b/8.1.1/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/8.1.1/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/8.1.1/remote/rules.mdx b/8.1.1/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/8.1.1/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/8.1.1/remote/sandbox.mdx b/8.1.1/remote/sandbox.mdx deleted file mode 100644 index cfb9be4..0000000 --- a/8.1.1/remote/sandbox.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --define=EXECUTOR=remote -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/8.1.1/remote/workspace.mdx b/8.1.1/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/8.1.1/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/8.1.1/rules/bzl-style.mdx b/8.1.1/rules/bzl-style.mdx deleted file mode 100644 index 941028a..0000000 --- a/8.1.1/rules/bzl-style.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Conventionally, `name` should be the first argument when defining a macro. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/8.1.1/rules/challenges.mdx b/8.1.1/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/8.1.1/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/8.1.1/rules/deploying.mdx b/8.1.1/rules/deploying.mdx deleted file mode 100644 index 3fe2c86..0000000 --- a/8.1.1/rules/deploying.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - MODULE.bazel - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### MODULE.bazel - -In the project's `MODULE.bazel`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -module(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains a brief description -of your ruleset, and the API users should expect. - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies, which you'll need to specify in -your MODULE.bazel file. - -#### Registering toolchains - -Your rules might also register toolchains, which you can also specify in the -MODULE.bazel file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `MODULE.bazel` file. This snippet in general will look as follows: - -``` -bazel_dep(name = "rules_", version = "") -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to add a dependency on your ruleset in their `MODULE.bazel` file. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/8.1.1/rules/errors/read-only-variable.mdx b/8.1.1/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/8.1.1/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/8.1.1/rules/faq.mdx b/8.1.1/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/8.1.1/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/8.1.1/rules/index.mdx b/8.1.1/rules/index.mdx deleted file mode 100644 index 2a6c3eb..0000000 --- a/8.1.1/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Rust](https://github.com/bazelbuild/rules_rust) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/8.1.1/rules/legacy-macro-tutorial.mdx b/8.1.1/rules/legacy-macro-tutorial.mdx deleted file mode 100644 index 44cdcfb..0000000 --- a/8.1.1/rules/legacy-macro-tutorial.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 'Creating a Legacy Macro' ---- - - - -IMPORTANT: This tutorial is for [*legacy macros*](/extending/legacy-macros). If -you only need to support Bazel 8 or newer, we recommend using [symbolic -macros](/extending/macros) instead; take a look at [Creating a Symbolic -Macro](../macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a legacy macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -```starlark -def miniature(name, src, size = "100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - # Note that the line below will fail if `src` is not a filename string - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - - * By convention, legacy macros have a `name` argument, just like rules. - - * To document the behavior of a legacy macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - - * To call a `genrule`, or any other native rule, prefix with `native.`. - - * Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like - `visibility`, or `tags`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -And finally, a **warning note**: the macro assumes that `src` is a filename -string (otherwise, `outs = ["small_" + src]` will fail). So `src = "image.png"` -works; but what happens if the `BUILD` file instead used `src = -"//other/package:image.png"`, or even `src = select(...)`? - -You should make sure to declare such assumptions in your macro's documentation. -Unfortunately, legacy macros, especially large ones, tend to be fragile because -it can be hard to notice and document all such assumptions in your code – and, -of course, some users of the macro won't read the documentation. We recommend, -if possible, instead using [symbolic macros](/extending/macros), which have -built\-in checks on attribute types. diff --git a/8.1.1/rules/macro-tutorial.mdx b/8.1.1/rules/macro-tutorial.mdx deleted file mode 100644 index 93825aa..0000000 --- a/8.1.1/rules/macro-tutorial.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Creating a Symbolic Macro' ---- - - - -IMPORTANT: This tutorial is for [*symbolic macros*](/extending/macros) – the new -macro system introduced in Bazel 8. If you need to support older Bazel versions, -you will want to write a [legacy macro](/extending/legacy-macros) instead; take -a look at [Creating a Legacy Macro](../legacy-macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a symbolic macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define an *implementation function* and a *macro declaration* in a separate -`.bzl` file, and call the file `miniature.bzl`: - -```starlark -# Implementation function -def _miniature_impl(name, visibility, src, size, **kwargs): - native.genrule( - name = name, - visibility = visibility, - srcs = [src], - outs = [name + "_small_" + src.name], - cmd = "convert $< -resize " + size + " $@", - **kwargs, - ) - -# Macro declaration -miniature = macro( - doc = """Create a miniature of the src image. - - The generated file name will be prefixed with `name + "_small_"`. - """, - implementation = _miniature_impl, - # Inherit most of genrule's attributes (such as tags and testonly) - inherit_attrs = native.genrule, - attrs = { - "src": attr.label( - doc = "Image file", - allow_single_file = True, - # Non-configurable because our genrule's output filename is - # suffixed with src's name. (We want to suffix the output file with - # srcs's name because some tools that operate on image files expect - # the files to have the right file extension.) - configurable = False, - ), - "size": attr.string( - doc = "Output size in WxH format", - default = "100x100", - ), - # Do not allow callers of miniature() to set srcs, cmd, or outs - - # _miniature_impl overrides their values when calling native.genrule() - "srcs": None, - "cmd": None, - "outs": None, - }, -) -``` - -A few remarks: - - * Symbolic macro implementation functions must have `name` and `visibility` - parameters. They should used for the macro's main target. - - * To document the behavior of a symbolic macro, use `doc` parameters for - `macro()` and its attributes. - - * To call a `genrule`, or any other native rule, use `native.`. - - * Use `**kwargs` to forward the extra inherited arguments to the underlying - `genrule` (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful so that a user can set standard attributes like `tags` or - `testonly`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/8.1.1/rules/performance.mdx b/8.1.1/rules/performance.mdx deleted file mode 100644 index 5870c0d..0000000 --- a/8.1.1/rules/performance.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -The [`--experimental_command_profile`](https://bazel.build/reference/command-line-reference#flag--experimental_command_profile) -flag may be used to capture Java Flight Recorder profiles of various kinds -(cpu time, wall time, memory allocations and lock contention). - -The [`--starlark_cpu_profile`](https://bazel.build/reference/command-line-reference#flag--starlark_cpu_profile) -flag may be used to write a pprof profile of CPU usage by all Starlark threads. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.4 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/8.1.1/rules/rules-tutorial.mdx b/8.1.1/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/8.1.1/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/8.1.1/rules/testing.mdx b/8.1.1/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/8.1.1/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/8.1.1/rules/verbs-tutorial.mdx b/8.1.1/rules/verbs-tutorial.mdx deleted file mode 100644 index db7757e..0000000 --- a/8.1.1/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following symbolic macro (available in Bazel 8 or newer) to -create targets for both of the above rules together: - -```starlark -def _sphinx_site_impl(name, visibility, srcs, **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. We - # set `visibility = visibility` to make it visible to callers of the - # macro. - _sphinx_site(name = name, visibility = visibility, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. We don't want it to be visible to callers of - # our macro, so we omit visibility for it. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) - -sphinx_site = macro( - implementation = _sphinx_site_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, - # Inherit common attributes like tags and testonly - inherit_attrs = "common", -) -``` - -Or, if you need to support Bazel releases older than Bazel 8, you would instead -define a legacy macro: - -```starlark -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/8.1.1/run/bazelrc.mdx b/8.1.1/run/bazelrc.mdx deleted file mode 100644 index 15f89c8..0000000 --- a/8.1.1/run/bazelrc.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `MODULE.bazel` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage`, `fetch`, and `vendor` inherit from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### `--enable_platform_specific_config` - -Platform specific configs in the `.bazelrc` can be automatically enabled using -`--enable_platform_specific_config`. For example, if the host OS is Linux and -the `build` command is run, the `build:linux` configuration will be -automatically enabled. Supported OS identifiers are `linux`, `macos`, `windows`, -`freebsd`, and `openbsd`. Enabling this flag is equivalent to using -`--config=linux` on Linux, `--config=windows` on Windows, and so on. - -See [--enable_platform_specific_config](/reference/command-line-reference#flag--enable_platform_specific_config). - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/8.1.1/run/client-server.mdx b/8.1.1/run/client-server.mdx deleted file mode 100644 index 1868635..0000000 --- a/8.1.1/run/client-server.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. Each server can handle at most one -invocation at a time; further concurrent invocations will either block or -fail-fast (see `--block_for_lock`). - -When you run `bazel`, you're running the client. The client finds the server -based on the [output base](/run/scripts#output-base-option), which by default is -determined by the path of the base workspace directory and your userid, so if -you build in multiple workspaces, you'll have multiple output bases and thus -multiple Bazel server processes. Multiple users on the same workstation can -build concurrently in the same workspace because their output bases will differ -(different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/8.1.1/run/scripts.mdx b/8.1.1/run/scripts.mdx deleted file mode 100644 index f267c90..0000000 --- a/8.1.1/run/scripts.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you should be mindful that each Blaze server process can handle at most one -invocation [at a time](/run/client-server#clientserver-implementation). -Depending on your situation it may make sense for each instance of your script -to wait its turn, or it may make sense to use `--output_base` to run multiple -Blaze servers and use those. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Transient error publishing results to the Build Event Service. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Persistent error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. -- `49` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/8.1.1/start/android-app.mdx b/8.1.1/start/android-app.mdx deleted file mode 100644 index b0e6f1b..0000000 --- a/8.1.1/start/android-app.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel workspace that contains the source code - for the app and a `MODULE.bazel` file that identifies the top level of the - workspace directory. -* Update the `MODULE.bazel` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `MODULE.bazel` file at -its root. - -The `MODULE.bazel` file may be empty or may contain references to [external -dependencies](/external/overview) required to build your project. - -First, run the following command to create an empty `MODULE.bazel` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch MODULE.bazel` | -| Windows (Command Prompt) | `type nul > MODULE.bazel` | -| Windows (PowerShell) | `New-Item MODULE.bazel -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`MODULE.bazel` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`MODULE.bazel` file so that Bazel knows where to find them. - -Add the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android", version = "0.5.1") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and use `rules_android_ndk` by adding the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android_ndk", version = "0.1.2") -``` - - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `MODULE.bazel` file. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `MODULE.bazel` file is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/8.1.1/start/cpp.mdx b/8.1.1/start/cpp.mdx deleted file mode 100644 index adb7c71..0000000 --- a/8.1.1/start/cpp.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You're in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel's context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven't -already. This tutorial uses Git for source control, so for best results [install -Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` -directory. - -Take a look at how it's structured: - -```none -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── MODULE.bazel - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In the third and final -stage, you will build a project with multiple packages and build it with -multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your -[workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -Before you can build a project, you need to set up its workspace. A workspace -is a directory that holds your project's source files and Bazel's build outputs. -It also contains these significant files: - -* The `MODULE.bazel` file, which identifies the directory and its contents as - a Bazel workspace and lives at the root of the project's directory - structure. It's also where you specify your external dependencies. -* One or more [`BUILD` - files](https://bazel.build/reference/glossary#build-file), which tell Bazel - how to build different parts of the project. A directory within the - workspace that contains a `BUILD` file is a - [package](https://bazel.build/reference/glossary#package). (More on packages - later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `MODULE.bazel` in that directory. For the purposes of this -tutorial, a `MODULE.bazel` file is already present in each stage. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one -[rule](https://bazel.build/reference/glossary#rule) as a set of instructions, -which tells Bazel how to build the outputs you want, such as executable binaries -or libraries. Each instance of a build rule in the `BUILD` file is called a -[target](https://bazel.build/reference/glossary#target) and points to a specific -set of source files and -[dependencies](https://bazel.build/reference/glossary#dependency). A target can -also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -```bazel -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[`cc_binary` rule](https://bazel.build/reference/be/c-cpp#cc_binary). The rule -tells Bazel to build a self-contained executable binary from the -`hello-world.cc`> source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - -## Stage 1: single target, single package - -It's time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -```none -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── MODULE.bazel -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed "`Hello world`" message. - -Here's the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source -file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world -displays a single target with a single source file.") - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a -build is structured. In the next stage, you will add complexity by adding -another target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up -your builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -```none - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel -``` - -Take a look at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library (using -Bazel's built-in [`cc_library` -rule](https://bazel.build/reference/be/c-cpp#cc_library)), then the -`hello-world` binary. The `deps` attribute in the `hello-world` target tells -Bazel that the `hello-greet` library is required to build the `hello-world` -binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another "`Hello -world`": - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an -extra input named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after -modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency -graph for `hello-world` displays dependency changes after modification to the -file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look at the structure and contents of the -`cpp-tutorial/stage3` directory: - -```none -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -```bazel -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package -depends on the target in the `lib` -package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for -`hello-world` displays how the target in the main package depends on the target -in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You've now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use - cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the - tutorials for [Java](https://bazel.build/start/java), [Android - application](https://bazel.build/start/android-app), or [iOS - application](https://bazel.build/start/ios-app). -* To learn more about working with local and remote repositories, read about - [external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel's other rules, see this [reference - guide](https://bazel.build/rules). - -Happy building! diff --git a/8.1.1/start/ios-app.mdx b/8.1.1/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/8.1.1/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/8.1.1/start/java.mdx b/8.1.1/start/java.mdx deleted file mode 100644 index b892917..0000000 --- a/8.1.1/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -one source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/8.1.1/tutorials/cpp-dependency.mdx b/8.1.1/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/8.1.1/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/8.1.1/tutorials/cpp-labels.mdx b/8.1.1/tutorials/cpp-labels.mdx deleted file mode 100644 index 78d0dbc..0000000 --- a/8.1.1/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `MODULE.bazel` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/8.1.1/tutorials/cpp-use-cases.mdx b/8.1.1/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 6695cce..0000000 --- a/8.1.1/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── MODULE.bazel -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Include external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest) -. -You can add a dependency on it in the `MODULE.bazel` file to -download Google Test and make it available in your repository: - -```python -bazel_dep(name = "googletest", version = "1.15.2") -``` - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = [ - "-Iexternal/gtest/googletest/include", - "-Iexternal/gtest/googletest", - ], - deps = [ - "@googletest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/8.1.1/versions/index.mdx b/8.1.1/versions/index.mdx deleted file mode 100644 index 4290e57..0000000 --- a/8.1.1/versions/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The default documentation on this website represents the latest version at HEAD. -Each major and minor supported release will have a snapshot of the narrative and -reference documentation that follows the lifecycle of Bazel's version support. - -To see documentation for stable Bazel versions, use the "Versioned docs" -drop-down. - -To see documentation for older Bazel versions prior to Feb 2022, go to -[docs.bazel.build](https://docs.bazel.build/). diff --git a/8.2.1/about/faq.mdx b/8.2.1/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/8.2.1/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/8.2.1/about/intro.mdx b/8.2.1/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/8.2.1/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/8.2.1/about/roadmap.mdx b/8.2.1/about/roadmap.mdx deleted file mode 100644 index 2e18b78..0000000 --- a/8.2.1/about/roadmap.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -As the Bazel project continues to evolve in response to your needs, we want to -share our 2024 update. - -This roadmap describes current initiatives and predictions for the future of -Bazel development, giving you visibility into current priorities and ongoing -projects. - -## Bazel 8.0 Release - -We plan to bring Bazel 8.0 [long term support -(LTS)](https://bazel.build/release/versioning) to you in late 2024. -The following features are planned to be implemented. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive -dependencies, allowing projects to scale while staying fast and -resource-efficient. - -With Bazel 8, we will disable WORKSPACE support by default (it will still be -possible to enable it using `--enable_workspace`); with Bazel 9 WORKSPACE -support will be removed. Starting with Bazel 7.1, you can set -`--noenable_workspace` to opt into the new behavior. - -Bazel 8.0 will contain a number of enhancements to -[Bazel's external dependency management] -(https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) -functionality, including: - -* The new flag `--enable_workspace` can be set to `false` to completely - disable WORKSPACE functionality. -* New directory watching API (see - [#21435](https://github.com/bazelbuild/bazel/pull/21435), shipped in Bazel - 7.1). -* Improved scheme for generating canonical repository names for better - cacheability of actions across dependency version updates. - ([#21316](https://github.com/bazelbuild/bazel/pull/21316), shipped in Bazel - 7.1) -* An improved shared repository cache (see - [#12227](https://github.com/bazelbuild/bazel/issues/12227)). -* Vendor and offline mode support — allows users to run builds with - pre-downloaded dependencies (see - [#19563](https://github.com/bazelbuild/bazel/issues/19563)). -* Reduced merge conflicts in lock files - ([#20396](https://github.com/bazelbuild/bazel/issues/20369)). -* Segmented MODULE.bazel - ([#17880](https://github.com/bazelbuild/bazel/issues/17880)) -* Allow overriding module extension generated repository - ([#19301](https://github.com/bazelbuild/bazel/issues/19301)) -* Improved documentation (e.g. - [#18030](https://github.com/bazelbuild/bazel/issues/18030), - [#15821](https://github.com/bazelbuild/bazel/issues/15821)) and migration - guide and migration tooling. - -### Remote execution improvements - -* Add support for asynchronous execution, speeding up remote execution by - increased parallelism with flag `--jobs`. -* Make it easier to debug cache misses by a new compact execution log, - reducing its size by 100x and its runtime overhead significantly (see - [#18643](https://github.com/bazelbuild/bazel/issues/18643)). -* Implement garbage collection for the disk cache (see - [#5139](https://github.com/bazelbuild/bazel/issues/5139)). -* Implement remote output service to allow lazy downloading of arbitrary build - outputs (see - [#20933](https://github.com/bazelbuild/bazel/discussions/20933)). - -### Migration of Android, C++, Java, Python, and Proto rules - -Complete migration of Android, C++, Java, and Python rulesets to dedicated -repositories and decoupling them from the Bazel releases. This effort allows -Bazel users and rule authors to - -* Update rules independently of Bazel. -* Update and customize rules as needed. - -The new location of the rulesets is going to be `bazelbuild/rules_android`, -`rules_cc`, `rules_java`, `rules_python` and `google/protobuf`. `rules_proto` is -going to be deprecated. - -Bazel 8 will provide a temporary migration flag that will automatically use the -rulesets that were previously part of the binary from their repositories. All -the users of those rulesets are expected to eventually depend on their -repositories and load them similarly to other rulesets that were never part of -Bazel. - -Bazel 8 will also improve on the existing extending rules and subrule APIs and -mark them as non-experimental. - -### Starlark improvements - -* Symbolic Macros are a new way of writing macros that is friendlier to - `BUILD` users, macro authors, and tooling. Compared to legacy macros, which - Bazel has only limited insight into, symbolic macros help users avoid common - pitfalls and enforce best practices. -* Package finalizers are a proposed feature for adding first-class support for - custom package validation logic. They are intended to help us deprecate - `native.existing_rules()`. - -### Configurability - -* Output path mapping continues to stabilize: promising better remote cache - performance and build speed for rule designers who use transitions. -* Automatically set build flags suitable for a given `--platforms`. -* Define project-supported flag combinations and automatically build targets - with default flags without having to set bazelrcs. -* Don't redo build analysis every time build flags change. - -### Project Skyfocus - minimize retained data structures - -Bazel holds a lot of state in RAM for fast incremental builds. However, -developers often change a small subset of the source files (e.g. almost never -one of the external dependencies). With Skyfocus, Bazel will provide an -experimental way to drop unnecessary incremental state and reduce Bazel's memory -footprint, while still providing the same fast incremental build experience. - -The initial scope aims to improve the retained heap metric only. Peak heap -reduction is a possibility, but not included in the initial scope. - -### Misc - -* Mobile install v3, a simpler and better maintained approach to incrementally - deploy Android applications. -* Garbage collection for repository caches and Bazel's `install_base`. -* Reduced sandboxing overhead. - -### Bazel-JetBrains* IntelliJ IDEA support - -Incremental IntelliJ plugin updates to support the latest JetBrains plugin -release. - -*This roadmap snapshots targets, and should not be taken as guarantees. -Priorities are subject to change in response to developer and customer -feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the -[Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/8.2.1/about/vision.mdx b/8.2.1/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/8.2.1/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/8.2.1/about/why.mdx b/8.2.1/about/why.mdx deleted file mode 100644 index 97cfa36..0000000 --- a/8.2.1/about/why.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Why Bazel?' ---- - - - -Bazel is a [fast](#fast), [correct](#correct), and [extensible](#extensible) -build tool with [integrated testing](#integrated-testing) that supports multiple -[languages](#multi-language), [repositories](#multi-repository), and -[platforms](#multi-platform) in an industry-leading [ecosystem](#ecosystem). - -## Bazel is fast - -Bazel knows exactly what input files each build command needs, avoiding -unnecessary work by re-running only when the set of input files have -changed between each build. -It runs build commands with as much parallelism as possible, either within the -same computer or on [remote build nodes](/remote/rbe). If the structure of build -allows for it, it can run thousands of build or test commands at the same time. - -This is supported by multiple caching layers, in memory, on disk and on the -remote build farm, if available. At Google, we routinely achieve cache hit rates -north of 99%. - -## Bazel is correct - -Bazel ensures that your binaries are built *only* from your own -source code. Bazel actions run in individual sandboxes and Bazel tracks -every input file of the build, only and always re-running build -commands when it needs to. This keeps your binaries up-to-date so that the -[same source code always results in the same binary](/basics/hermeticity), bit -by bit. - -Say goodbyte to endless `make clean` invocations and to chasing phantom bugs -that were in fact resolved in source code that never got built. - -## Bazel is extensible - -Harness the full power of Bazel by writing your own rules and macros to -customize Bazel for your specific needs across a wide range of projects. - -Bazel rules are written in [Starlark](/rules/language), our -in-house programming language that's a subset of Python. Starlark makes -rule-writing accessible to most developers, while also creating rules that can -be used across the ecosystem. - -## Integrated testing - -Bazel's [integrated test runner](/docs/user-manual#running-tests) -knows and runs only those tests needing to be re-run, using remote execution -(if available) to run them in parallel. Detect flakes early by using remote -execution to quickly run a test thousands of times. - -Bazel [provides facilities](/remote/bep) to upload test results to a central -location, thereby facilitating efficient communication of test outcomes, be it -on CI or by individual developers. - -## Multi-language support - -Bazel supports many common programming languages including C++, Java, -Kotlin, Python, Go, and Rust. You can build multiple binaries (for example, -backend, web UI and mobile app) in the same Bazel invocation without being -constrained to one language's idiomatic build tool. - -## Multi-repository support - -Bazel can [gather source code from multiple locations](/external/overview): you -don't need to vendor your dependencies (but you can!), you can instead point -Bazel to the location of your source code or prebuilt artifacts (e.g. a git -repository or Maven Central), and it takes care of the rest. - -## Multi-platform support - -Bazel can simultaneously build projects for multiple platforms including Linux, -macOS, Windows, and Android. It also provides powerful -[cross-compilation capabilities](/extending/platforms) to build code for one -platform while running the build on another. - -## Wide ecosystem - -[Industry leaders](/community/users) love Bazel, building a large -community of developers who use and contribute to Bazel. Find a tools, services -and documentation, including [consulting and SaaS offerings](/community/experts) -Bazel can use. Explore extensions like support for programming languages in -our [open source software repositories](/rules). diff --git a/8.2.1/advanced/performance/build-performance-breakdown.mdx b/8.2.1/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/8.2.1/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/8.2.1/advanced/performance/build-performance-metrics.mdx b/8.2.1/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/8.2.1/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/8.2.1/advanced/performance/iteration-speed.mdx b/8.2.1/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 2bbf839..0000000 --- a/8.2.1/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. - -To make this condition an error rather than a warning, you can use the -`--noallow_analysis_cache_discard` flag (introduced in Bazel 6.4.0) diff --git a/8.2.1/advanced/performance/json-trace-profile.mdx b/8.2.1/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 56e278c..0000000 --- a/8.2.1/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query, Bazel writes a profile into -the output base named `command-$INOVCATION_ID.profile.gz`, where -`$INOVCATION_ID` is the invocation identifier of the command. Bazel also creates -a symlink called `command.profile.gz` in the output base that points the profile -of the latest command. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Bazel keeps the last 5 profiles, configurable by -[`--profiles_to_retain`](/reference/command-line-reference#flag--generate_json_trace_profile), -in the output base by default for post-build analysis. Explicitly passing a -profile path with `--profile` disables automatic garbage collection. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/8.2.1/advanced/performance/memory.mdx b/8.2.1/advanced/performance/memory.mdx deleted file mode 100644 index 844e691..0000000 --- a/8.2.1/advanced/performance/memory.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -### Trade incremental build speeds for memory - -If your builds are too big, Bazel may throw an `OutOfMemoryError` (OOM) when -it doesn't have enough memory. You can make Bazel use less memory, at the cost -of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -### Trade build flexibility for memory with Skyfocus (Experimental) - -If you want to make Bazel use less memory *and* retain incremental build speeds, -you can tell Bazel the working set of files that you will be modifying, and -Bazel will only keep state needed to correctly incrementally rebuild changes to -those files. This feature is called **Skyfocus**. - -To use Skyfocus, pass the `--experimental_enable_skyfocus` flag: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus -``` - -By default, the working set will be the set of files next to the target being -built. In the example, all files in `//pkg` will be kept in the working set, and -changes to files outside of the working set will be disallowed, until you issue -`bazel clean` or restart the Bazel server. - -If you want to specify an exact set of files or directories, use the -`--experimental_working_set` flag, like so: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus ---experimental_working_set=path/to/another/dir,path/to/tests/dir -``` - -You can also pass `--experimental_skyfocus_dump_post_gc_stats` to show the -memory reduction amount: - -Putting it altogether, you should see something like this: - -```none -$ bazel test //pkg:target //tests/... --experimental_enable_skyfocus --experimental_working_set dir1,dir2,dir3/subdir --experimental_skyfocus_dump_post_gc_stats -INFO: --experimental_enable_skyfocus is enabled. Blaze will reclaim memory not needed to build the working set. Run 'blaze dump --skyframe=working_set' to show the working set, after this command. -WARNING: Changes outside of the working set will cause a build error. -INFO: Analyzed 149 targets (4533 packages loaded, 169438 targets configured). -INFO: Found 25 targets and 124 test targets... -INFO: Updated working set successfully. -INFO: Focusing on 334 roots, 3 leafs... (use --experimental_skyfocus_dump_keys to show them) -INFO: Heap: 1237MB -> 676MB (-45.31%) -INFO: Elapsed time: 192.670s ... -INFO: Build completed successfully, 62303 total actions -``` - -For this example, using Skyfocus allowed Bazel to drop 561MB (45%) of memory, -and incremental builds to handle changes to files under `dir1`, `dir2`, and -`dir3/subdir` will retain their fast speeds, with the tradeoff that Bazel cannot -rebuild changed files outside of these directories. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. diff --git a/8.2.1/basics/artifact-based-builds.mdx b/8.2.1/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/8.2.1/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/8.2.1/basics/build-systems.mdx b/8.2.1/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/8.2.1/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/8.2.1/basics/dependencies.mdx b/8.2.1/basics/dependencies.mdx deleted file mode 100644 index 1d3bf8f..0000000 --- a/8.2.1/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need -to have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the [`buildtools` -directory](https://github.com/bazelbuild/buildtools). - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive -dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel [enforces strict -transitive -dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version -Rule](https://opensource.google/docs/thirdparty/oneversion/) for -all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -Bazel did not use to automatically download transitive dependencies. It used to -employ a `WORKSPACE` file that required all transitive dependencies to be -listed, which led to a lot of pain when managing external dependencies. Bazel -has since added support for automatic transitive external dependency management -in the form of the `MODULE.bazel` file. See [external dependency -overview](/external/overview) for more details. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location that -is shared across engineers, so if a developer depends on an artifact that was -recently built by someone else, the build system automatically downloads it -instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) -goes down, because your entire build might grind to a halt if it’s unable to -download an external dependency. There’s also a security risk: if the -third-party system is compromised by an attacker, the attacker could replace the -referenced artifact with one of their own design, allowing them to inject -arbitrary code into your build. Both problems can be mitigated by mirroring any -artifacts you depend on onto servers you control and blocking your build system -from accessing third-party artifact repositories like Maven Central. The -trade-off is that these mirrors take effort and resources to maintain, so the -choice of whether to use them often depends on the scale of the project. The -security issue can also be completely prevented with little overhead by -requiring the hash of each third-party artifact to be specified in the source -repository, causing the build to fail if the artifact is tampered with. Another -alternative that completely sidesteps the issue is to vendor your project’s -dependencies. When a project vendors its dependencies, it checks them into -source control alongside the project’s source code, either as source or as -binaries. This effectively means that all of the project’s external dependencies -are converted to internal dependencies. Google uses this approach internally, -checking every third-party library referenced throughout Google into a -`third_party` directory at the root of Google’s source tree. However, this works -at Google only because Google’s source control system is custom built to handle -an extremely large monorepo, so vendoring might not be an option for all -organizations. diff --git a/8.2.1/basics/distributed-builds.mdx b/8.2.1/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/8.2.1/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/8.2.1/basics/hermeticity.mdx b/8.2.1/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/8.2.1/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/8.2.1/basics/index.mdx b/8.2.1/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/8.2.1/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/8.2.1/basics/task-based-builds.mdx b/8.2.1/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/8.2.1/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/8.2.1/brand/index.mdx b/8.2.1/brand/index.mdx deleted file mode 100644 index 2a21cd4..0000000 --- a/8.2.1/brand/index.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Bazel Brand Guidelines' ---- - - - -The Bazel trademark and logo ("Bazel Trademarks") are trademarks of Google, and -are treated separately from the copyright or patent license grants contained in -the Apache-licensed Bazel repositories on GitHub. Any use of the Bazel -Trademarks other than those permitted in these guidelines must be approved in -advance. - -## Purpose of the Brand Guidelines - -These guidelines exist to ensure that the Bazel project can share its technology -under open source licenses while making sure that the "Bazel" brand is protected -as a meaningful source identifier in a way that's consistent with trademark law. -By adhering to these guidelines, you help to promote the freedom to use and -develop high-quality Bazel technology. - -## Acceptable Uses - -Given the open nature of Bazel, you may use the Bazel trademark to refer to the -project without prior written permission. Examples of these approved references -include the following: - -* To refer to the Bazel Project itself; -* To link to bazel.build; -* To refer to unmodified source code or other files shared by the Bazel - repositories on GitHub; -* In blog posts, news articles, or educational materials about Bazel; -* To accurately identify that your design or implementation is based on, is - for use with, or is compatible with Bazel technology. - -Examples: - -* \[Your Product\] for Bazel -* \[Your Product\] is compatible with Bazel -* \[XYZ\] Conference for Bazel Users - -## General Guidelines - -* The Bazel name may never be used or registered in a manner that would cause - confusion as to Google's sponsorship, affiliation, or endorsement. -* Don't use the Bazel name as part of your company name, product name, domain - name, or social media profile. -* Other than as permitted by these guidelines, the Bazel name should not be - combined with other trademarks, terms, or source identifiers. -* Don't remove, distort or alter any element of the Bazel Trademarks. That - includes modifying the Bazel Trademark, for example, through hyphenation, - combination or abbreviation. Do not shorten, abbreviate, or create acronyms - out of the Bazel Trademarks. -* Don't display the word Bazel using any different stylization, color, or font - from the surrounding text. -* Don't use the term Bazel as a verb or use it in possessive form. -* Don't use the Bazel logo on any website, product UI, or promotional - materials without prior written permission from - [product@bazel.build](mailto:product@bazel.build). - -## Usage for Events and Community Groups - -The Bazel word mark may be used referentially in events, community groups, or -other gatherings related to the Bazel build system, but it may not be used in a -manner that implies official status or endorsement. - -Examples of appropriate naming conventions are: - -* \[XYZ\] Bazel User Group -* Bazel Community Day at \[XYZ\] -* \[XYZ\] Conference for Bazel Users - -where \[XYZ\] represents the location and optionally other wordings. - -Any naming convention that may imply official status or endorsement requires -review for approval from [product@bazel.build](mailto:product@bazel.build). - -Examples of naming conventions that require prior written permission: - -* BazelCon -* Bazel Conference - -## Contact Us - -Please do not hesitate to contact us at -[product@bazel.build](mailto:product@bazel.build) if you are unsure whether your -intended use of the Bazel Trademarks is in compliance with these guidelines, or -to ask for permission to use the Bazel Trademarks, clearly describing the -intended usage and duration. diff --git a/8.2.1/build/share-variables.mdx b/8.2.1/build/share-variables.mdx deleted file mode 100644 index b248034..0000000 --- a/8.2.1/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/8.2.1/build/style-guide.mdx b/8.2.1/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/8.2.1/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/8.2.1/community/recommended-rules.mdx b/8.2.1/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/8.2.1/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/8.2.1/community/remote-execution-services.mdx b/8.2.1/community/remote-execution-services.mdx deleted file mode 100644 index bede2b8..0000000 --- a/8.2.1/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [NativeLink](https://github.com/TraceMachina/nativelink) - -* Commercial - - * [Aspect Build](https://www.aspect.build/) – Self-hosted remote cache and remote execution services. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service with Build and Test UI. Can be self-hosted or hosted. diff --git a/8.2.1/community/roadmaps-starlark.mdx b/8.2.1/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/8.2.1/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/8.2.1/community/sig.mdx b/8.2.1/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/8.2.1/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/8.2.1/community/update.mdx b/8.2.1/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/8.2.1/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/8.2.1/concepts/build-ref.mdx b/8.2.1/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/8.2.1/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/8.2.1/concepts/platforms.mdx b/8.2.1/concepts/platforms.mdx deleted file mode 100644 index e560ea4..0000000 --- a/8.2.1/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--ios_multi_cpus=x86_64 --apple_platform_type=ios". - //platforms:ios - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - -flags: - # Maps "--ios_multi_cpus=x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/8.2.1/concepts/visibility.mdx b/8.2.1/concepts/visibility.mdx deleted file mode 100644 index cb7441d..0000000 --- a/8.2.1/concepts/visibility.mdx +++ /dev/null @@ -1,610 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. A target will fail -to build during the [analysis](/reference/glossary#analysis-phase) phase if it -violates the visibility of one of its dependencies. - -Generally, a target `A` is visible to a target `B` if they are in the same -location, or if `A` grants visibility to `B`'s location. In the absence of -[symbolic macros](/extending/macros), the term "location" can be simplified -to just "package"; see [below](#symbolic-macros) for more on symbolic macros. - -Visibility is specified by listing allowed packages. Allowing a package does not -necessarily mean that its subpackages are also allowed. For more details on -packages and subpackages, see [Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This shouldn't be done for production usage in -submitted code. - -The primary way to control visibility is with a rule's -[`visibility`](/reference/be/common-definitions#common.visibility) attribute. -The following subsections describe the attribute's format, how to apply it to -various kinds of targets, and the interaction between the visibility system and -symbolic macros. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that don't correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this location's package can use this target. - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets -declared in `//tests/BUILD`, but not by targets defined in -`//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -**Best practice:** When granting visibility to another team's project, prefer -`__subpackages__` over `__pkg__` to avoid needless visibility churn as that -project evolves and adds new subpackages. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is determined by taking its `visibility` attribute --- or a suitable default if not given -- and appending the location where the -target was declared. For targets not declared in a symbolic macro, if the -package specifies a [`default_visibility`](/reference/be/functions#package.default_visibility), -this default is used; for all other packages and for targets declared in a -symbolic macro, the default is just `["//visibility:private"]`. - -```starlark -# //mypkg/BUILD - -package(default_visibility = ["//friend:__pkg__"]) - -cc_library( - name = "t1", - ... - # No visibility explicitly specified. - # Effective visibility is ["//friend:__pkg__", "//mypkg:__pkg__"]. - # If no default_visibility were given in package(...), the visibility would - # instead default to ["//visibility:private"], and the effective visibility - # would be ["//mypkg:__pkg__"]. -) - -cc_library( - name = "t2", - ... - visibility = [":clients"], - # Effective visibility is ["//mypkg:clients, "//mypkg:__pkg__"], which will - # expand to ["//another_friend:__subpackages__", "//mypkg:__pkg__"]. -) - -cc_library( - name = "t3", - ... - visibility = ["//visibility:private"], - # Effective visibility is ["//mypkg:__pkg__"] -) - -package_group( - name = "clients", - packages = ["//another_friend/..."], -) -``` - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -```starlark -# //mypkg/BUILD - -java_binary( - name = "foo", - ... - visibility = ["//friend:__pkg__"], -) -``` - -```starlark -# //friend/BUILD - -some_rule( - name = "bar", - deps = [ - # Allowed directly by visibility of foo. - "//mypkg:foo", - # Also allowed. The java_binary's "_deploy.jar" implicit output file - # target the same visibility as the rule target itself. - "//mypkg:foo_deploy.jar", - ] - ... -) -``` - -### Source file target visibility - -Source file targets can either be explicitly declared using -[`exports_files`](/reference/be/functions#exports_files), or implicitly created -by referring to their filename in a label attribute of a rule (outside of a -symbolic macro). As with rule targets, the location of the call to -`exports_files`, or the BUILD file that referred to the input file, is always -automatically appended to the file's visibility. - -Files declared by `exports_files` can have their visibility set by the -`visibility` parameter to that function. If this parameter is not given, the visibility is public. - -Note: `exports_files` may not be used to override the visibility of a generated -file. - -For files that do not appear in a call to `exports_files`, the visibility -depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is true, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```starlark -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```starlark -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -### Visibility and symbolic macros - -This section describes how the visibility system interacts with -[symbolic macros](/extending/macros). - -#### Locations within symbolic macros - -A key detail of the visibility system is how we determine the location of a -declaration. For targets that are not declared in a symbolic macro, the location -is just the package where the target lives -- the package of the `BUILD` file. -But for targets created in a symbolic macro, the location is the package -containing the `.bzl` file where the macro's definition (the -`my_macro = macro(...)` statement) appears. When a target is created inside -multiple nested targets, it is always the innermost symbolic macro's definition -that is used. - -The same system is used to determine what location to check against a given -dependency's visibility. If the consuming target was created inside a macro, we -look at the innermost macro's definition rather than the package the consuming -target lives in. - -This means that all macros whose code is defined in the same package are -automatically "friends" with one another. Any target directly created by a macro -defined in `//lib:defs.bzl` can be seen from any other macro defined in `//lib`, -regardless of what packages the macros are actually instantiated in. Likewise, -they can see, and can be seen by, targets declared directly in `//lib/BUILD` and -its legacy macros. Conversely, targets that live in the same package cannot -necessarily see one another if at least one of them is created by a symbolic -macro. - -Within a symbolic macro's implementation function, the `visibility` parameter -has the effective value of the macro's `visibility` attribute after appending -the location where the macro was called. The standard way for a macro to export -one of its targets to its caller is to forward this value along to the target's -declaration, as in `some_rule(..., visibility = visibility)`. Targets that omit -this attribute won't be visible to the caller of the macro unless the caller -happens to be in the same package as the macro definition. This behavior -composes, in the sense that a chain of nested calls to submacros may each pass -`visibility = visibility`, re-exporting the inner macro's exported targets to -the caller at each level, without exposing any of the macros' implementation -details. - -#### Delegating privileges to a submacro - -The visibility model has a special feature to allow a macro to delegate its -permissions to a submacro. This is important for factoring and composing macros. - -Suppose you have a macro `my_macro` that creates a dependency edge using a rule -`some_library` from another package: - -```starlark -# //macro/defs.bzl -load("//lib:defs.bzl", "some_library") - -def _impl(name, visibility, ...): - ... - native.genrule( - name = name + "_dependency" - ... - ) - some_library( - name = name + "_consumer", - deps = [name + "_dependency"], - ... - ) - -my_macro = macro(implementation = _impl, ...) -``` - -```starlark -# //pkg/BUILD - -load("//macro:defs.bzl", "my_macro") - -my_macro(name = "foo", ...) -``` - -The `//pkg:foo_dependency` target has no `visibility` specified, so it is only -visible within `//macro`, which works fine for the consuming target. Now, what -happens if the author of `//lib` refactors `some_library` to instead be -implemented using a macro? - -```starlark -# //lib:defs.bzl - -def _impl(name, visibility, deps, ...): - some_rule( - # Main target, exported. - name = name, - visibility = visibility, - deps = deps, - ...) - -some_library = macro(implementation = _impl, ...) -``` - -With this change, `//pkg:foo_consumer`'s location is now `//lib` rather than -`//macro`, so its usage of `//pkg:foo_dependency` violates the dependency's -visibility. The author of `my_macro` can't be expected to pass -`visibility = ["//lib"]` to the declaration of the dependency just to work -around this implementation detail. - -For this reason, when a dependency of a target is also an attribute value of the -macro that declared the target, we check the dependency's visibility against the -location of the macro instead of the location of the consuming target. - -In this example, to validate whether `//pkg:foo_consumer` can see -`//pkg:foo_dependency`, we see that `//pkg:foo_dependency` was also passed as an -input to the call to `some_library` inside of `my_macro`, and instead check the -dependency's visibility against the location of this call, `//macro`. - -This process can repeat recursively, as long as a target or macro declaration is -inside of another symbolic macro taking the dependency's label in one of its -label-typed attributes. - -Note: Visibility delegation does not work for labels that were not passed into -the macro, such as labels derived by string manipulation. - -#### Finalizers - -Targets declared in a rule finalizer (a symbolic macro with `finalizer = True`), -in addition to seeing targets following the usual symbolic macro visibility -rules, can *also* see all targets which are visible to the finalizer target's -package. - -In other words, if you migrate a `native.existing_rules()`-based legacy macro to -a finalizer, the targets declared by the finalizer will still be able to see -their old dependencies. - -It is possible to define targets that a finalizer can introspect using -`native.existing_rules()`, but which it cannot use as dependencies under the -visibility system. For example, if a macro-defined target is not visible to its -own package or to the finalizer macro's definition, and is not delegated to the -finalizer, the finalizer cannot see such a target. Note, however, that a -`native.existing_rules()`-based legacy macro will also be unable to see such a -target. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target declarations into a macro in a `.bzl` file. Without the protection of -load visibility, they might find their macro reused by other collaborators in -the same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```starlark -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```starlark -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```starlark -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```starlark -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```starlark -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```starlark -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```starlark -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```starlark -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```starlark -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```starlark -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/8.2.1/configure/attributes.mdx b/8.2.1/configure/attributes.mdx deleted file mode 100644 index 7bc3f41..0000000 --- a/8.2.1/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.2.1/configure/best-practices.mdx b/8.2.1/configure/best-practices.mdx deleted file mode 100644 index abef72e..0000000 --- a/8.2.1/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `MODULE.bazel` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/8.2.1/configure/coverage.mdx b/8.2.1/configure/coverage.mdx deleted file mode 100644 index 9a50db0..0000000 --- a/8.2.1/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --branch-coverage --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/sphinx/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/8.2.1/contribute/breaking-changes.mdx b/8.2.1/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/8.2.1/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/8.2.1/contribute/codebase.mdx b/8.2.1/contribute/codebase.mdx deleted file mode 100644 index 8a13611..0000000 --- a/8.2.1/contribute/codebase.mdx +++ /dev/null @@ -1,1670 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `exec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". With complex SkyFunctions, state management - between restarts can get tricky, so - [`StateMachine`s](/contribute/statemachine-guide) were introduced for a - structured approach to logical concurrency, including hooks to suspend and - resume hierarchical computations within a `SkyFunction`. Example: - [`DependencyResolver#computeDependencies`][statemachine_example] - uses a `StateMachine` with `getState()` to compute the potentially huge set - of direct dependencies of a configured target, which otherwise can result in - expensive restarts. - -[statemachine_example]: https://developers.google.com/devsite/reference/markdown/links#reference_links - -Fundamentally, Bazel need these types of workarounds because hundreds of -thousands of in-flight Skyframe nodes is common, and Java's support of -lightweight threads [does not outperform][virtual_threads] the -`StateMachine` implementation as of 2023. - -[virtual_threads]: /contribute/statemachine-guide#epilogue_eventually_removing_callbacks - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to learn things about other configured targets that they depend on, and the only -way to tell things about themselves to other configured targets that depend on -them. The reason why "transitive" is in their name is that this is usually some -sort of roll-up of the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source or output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where an -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It was implemented in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can limit which -targets can depend on a particular target using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used for one -special case: - -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is _mostly_ written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/8.2.1/contribute/design-documents.mdx b/8.2.1/contribute/design-documents.mdx deleted file mode 100644 index 1fe70b9..0000000 --- a/8.2.1/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, MODULE.bazel, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/8.2.1/contribute/docs-style-guide.mdx b/8.2.1/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/8.2.1/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/8.2.1/contribute/docs.mdx b/8.2.1/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/8.2.1/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/8.2.1/contribute/index.mdx b/8.2.1/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/8.2.1/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/8.2.1/contribute/maintainers-guide.mdx b/8.2.1/contribute/maintainers-guide.mdx deleted file mode 100644 index d5edf45..0000000 --- a/8.2.1/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [meisterT](https://github.com/meisterT) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [buildbreaker2021](https://github.com/buildbreaker2021) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/8.2.1/contribute/naming.mdx b/8.2.1/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/8.2.1/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/8.2.1/contribute/patch-acceptance.mdx b/8.2.1/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/8.2.1/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/8.2.1/contribute/policy.mdx b/8.2.1/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/8.2.1/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/8.2.1/contribute/release-notes.mdx b/8.2.1/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/8.2.1/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/8.2.1/contribute/statemachine-guide.mdx b/8.2.1/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/8.2.1/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/8.2.1/contribute/windows-chocolatey-maintenance.mdx b/8.2.1/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/8.2.1/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/8.2.1/contribute/windows-scoop-maintenance.mdx b/8.2.1/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/8.2.1/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/8.2.1/docs/android-build-performance.mdx b/8.2.1/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/8.2.1/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/8.2.1/docs/android-instrumentation-test.mdx b/8.2.1/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/8.2.1/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/8.2.1/docs/android-ndk.mdx b/8.2.1/docs/android-ndk.mdx deleted file mode 100644 index b10a566..0000000 --- a/8.2.1/docs/android-ndk.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the target ABI - -To configure the target ABI, use the `--android_platforms` flag as follows: - -```posix-terminal -bazel build //:app --android_platforms={{ "" }}comma-separated list of platforms{{ "" }} -``` - -Just like the `--platforms` flag, the values passed to `--android_platforms` are -the labels of [`platform`](https://bazel.build/reference/be/platforms-and-toolchains#platform) -targets, using standard constraint values to describe your device. - -For example, for an Android device with a 64-bit ARM processor, you'd define -your platform like this: - -```py -platform( - name = "android_arm64", - constraint_values = [ - "@platforms//os:android", - "@platforms//cpu:arm64", - ], -) -``` - -Every Android `platform` should use the [`@platforms//os:android`](https://github.com/bazelbuild/platforms/blob/33a3b209f94856193266871b1545054afb90bb28/os/BUILD#L36) -OS constraint. To migrate the CPU constraint, check this chart: - -CPU Value | Platform -------------- | ------------------------------------------ -`armeabi-v7a` | `@platforms//cpu:armv7` -`arm64-v8a` | `@platforms//cpu:arm64` -`x86` | `@platforms//cpu:x86_32` -`x86_64` | `@platforms//cpu:x86_64` - -And, of course, for a multi-architecture APK, you pass multiple labels, for -example: `--android_platforms=//:arm64,//:x86_64` (assuming you defined those in -your top-level `BUILD.bazel` file). - -Bazel is unable to select a default Android platform, so one must be defined and -specified with `--android_platforms`. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | -| C++17 | `--cxxopt=-std=c++17` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--platforms` flag. - -For example, assuming you have defined Android platforms in -`my/platforms/BUILD`: - -```posix-terminal -bazel build //my/cc/jni:target \ - --platforms=//my/platforms:x86_64 -``` - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --platforms=//my/platforms:x86 - -common:android_armeabi-v7a --platforms=//my/platforms:armeabi-v7a - -# In general -common:android_ --platforms=//my/platforms: -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/8.2.1/docs/bazel-and-android.mdx b/8.2.1/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/8.2.1/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/8.2.1/docs/bazel-and-apple.mdx b/8.2.1/docs/bazel-and-apple.mdx deleted file mode 100644 index 6e4a06f..0000000 --- a/8.2.1/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/8.2.1/docs/bazel-and-cpp.mdx b/8.2.1/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/8.2.1/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/8.2.1/docs/bazel-and-java.mdx b/8.2.1/docs/bazel-and-java.mdx deleted file mode 100644 index e9476aa..0000000 --- a/8.2.1/docs/bazel-and-java.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using the `local_java_repository` or -`remote_java_repository` repo rules in a module extension. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@rules_java//toolchains:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@rules_java//toolchains:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `MODULE.bazel` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@rules_java//toolchains:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@rules_java//toolchains:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@rules_java//toolchains:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/8.2.1/docs/bazel-and-javascript.mdx b/8.2.1/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/8.2.1/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/8.2.1/docs/configurable-attributes.mdx b/8.2.1/docs/configurable-attributes.mdx deleted file mode 100644 index 3515852..0000000 --- a/8.2.1/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.2.1/docs/sandboxing.mdx b/8.2.1/docs/sandboxing.mdx deleted file mode 100644 index 6869795..0000000 --- a/8.2.1/docs/sandboxing.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel and debugging your sandboxing -environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/8.2.1/extending/aspects.mdx b/8.2.1/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/8.2.1/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/8.2.1/extending/auto-exec-groups.mdx b/8.2.1/extending/auto-exec-groups.mdx deleted file mode 100644 index abba3d5..0000000 --- a/8.2.1/extending/auto-exec-groups.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 'Automatic Execution Groups (AEGs)' ---- - - -Automatic execution groups select an [execution platform][exec_platform] -for each toolchain type. In other words, one target can have multiple -execution platforms without defining execution groups. - -## Quick summary - -Automatic execution groups are closely connected to toolchains. If you are using -toolchains, you need to set them on the affected actions (actions which use an -executable or a tool from a toolchain) by adding `toolchain` parameter. For -example: - -```python -ctx.actions.run( - ..., - executable = ctx.toolchain['@bazel_tools//tools/jdk:toolchain_type'].tool, - ..., - toolchain = '@bazel_tools//tools/jdk:toolchain_type', -) -``` -If the action does not use a tool or executable from a toolchain, and Blaze -doesn't detect that ([the error](#first-error-message) is raised), you can set -`toolchain = None`. - -If you need to use multiple toolchains on a single execution platform (an action -uses executable or tools from two or more toolchains), you need to manually -define [exec_groups][exec_groups] (check -[When should I use a custom exec_group?][multiple_toolchains_exec_groups] -section). - -## History - -Before AEGs, the execution platform was selected on a rule level. For example: - -```python -my_rule = rule( - _impl, - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], -) -``` - -Rule `my_rule` registers two toolchain types. This means that the [Toolchain -Resolution](https://bazel.build/extending/toolchains#toolchain-resolution) used -to find an execution platform which supports both toolchain types. The selected -execution platform was used for each registered action inside the rule, unless -specified differently with [exec_groups][exec_groups]. -In other words, all actions inside the rule used to have a single execution -platform even if they used tools from different toolchains (execution platform -is selected for each target). This resulted in failures when there was no -execution platform supporting all toolchains. - -## Current state - -With AEGs, the execution platform is selected for each toolchain type. The -implementation function of the earlier example, `my_rule`, would look like: - -```python -def _impl(ctx): - ctx.actions.run( - mnemonic = "First action", - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - toolchain = '//tools:toolchain_type_1', - ) - - ctx.actions.run( - mnemonic = "Second action", - executable = ctx.toolchain['//tools:toolchain_type_2'].tool, - toolchain = '//tools:toolchain_type_2', - ) -``` - -This rule creates two actions, the `First action` which uses executable from a -`//tools:toolchain_type_1` and the `Second action` which uses executable from a -`//tools:toolchain_type_2`. Before AEGs, both of these actions would be executed -on a single execution platform which supports both toolchain types. With AEGs, -by adding the `toolchain` parameter inside the actions, each action executes on -the execution platform that provides the toolchain. The actions may be executed -on different execution platforms. - -The same is effective with [ctx.actions.run_shell][run_shell] where `toolchain` -parameter should be added when `tools` are from a toolchain. - -## Difference between custom exec groups and automatic exec groups - -As the name suggests, AEGs are exec groups created automatically for each -toolchain type registered on a rule. There is no need to manually specify them, -unlike the "classic" exec groups. - -### When should I use a custom exec_group? - -Custom exec_groups are needed only in case where multiple toolchains need to -execute on a single execution platform. In all other cases there's no need to -define custom exec_groups. For example: - -```python -def _impl(ctx): - ctx.actions.run( - ..., - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - tools = [ctx.toolchain['//tools:toolchain_type_2'].tool], - exec_group = 'two_toolchains', - ) -``` - -```python -my_rule = rule( - _impl, - exec_groups = { - "two_toolchains": exec_group( - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], - ), - } -) -``` - -## Migration of AEGs - -Internally in google3, Blaze is already using AEGs. -Externally for Bazel, migration is in the process. Some rules are already using -this feature (e.g. Java and C++ rules). - -### Which Bazel versions support this migration? - -AEGs are fully supported from Bazel 7. - -### How to enable AEGs? - -Set `--incompatible_auto_exec_groups` to true. More information about the flag -on [the GitHub issue][github_flag]. - -### How to enable AEGs inside a particular rule? - -Set the `_use_auto_exec_groups` attribute on a rule. - -```python -my_rule = rule( - _impl, - attrs = { - "_use_auto_exec_groups": attr.bool(default = True), - } -) -``` -This enables AEGs only in `my_rule` and its actions start using the new logic -when selecting the execution platform. Incompatible flag is overridden with this -attribute. - -### How to disable AEGs in case of an error? - -Set `--incompatible_auto_exec_groups` to false to completely disable AEGs in -your project ([flag's GitHub issue][github_flag]), or disable a particular rule -by setting `_use_auto_exec_groups` attribute to `False` -([more details about the attribute](#how-enable-particular-rule)). - -### Error messages while migrating to AEGs - -#### Couldn't identify if tools are from implicit dependencies or a toolchain. Please set the toolchain parameter. If you're not using a toolchain, set it to 'None'. - * In this case you get a stack of calls before the error happened and you can - clearly see which exact action needs the toolchain parameter. Check which - toolchain is used for the action and set it with the toolchain param. If no - toolchain is used inside the action for tools or executable, set it to - `None`. - -#### Action declared for non-existent toolchain '[toolchain_type]'. - * This means that you've set the toolchain parameter on the action but didn't -register it on the rule. Register the toolchain or set `None` inside the action. - -## Additional material - -For more information, check design document: -[Automatic exec groups for toolchains][aegs_design_doc]. - -[exec_platform]: https://bazel.build/extending/platforms#:~:text=Execution%20%2D%20a%20platform%20on%20which%20build%20tools%20execute%20build%20actions%20to%20produce%20intermediate%20and%20final%20outputs. -[exec_groups]: https://bazel.build/extending/exec-groups -[github_flag]: https://github.com/bazelbuild/bazel/issues/17134 -[aegs_design_doc]: https://docs.google.com/document/d/1-rbP_hmKs9D639YWw5F_JyxPxL2bi6dSmmvj_WXak9M/edit#heading=h.5mcn15i0e1ch -[run_shell]: https://bazel.build/rules/lib/builtins/actions#run_shell -[multiple_toolchains_exec_groups]: /extending/auto-exec-groups#when-should-use-exec-groups diff --git a/8.2.1/extending/concepts.mdx b/8.2.1/extending/concepts.mdx deleted file mode 100644 index eb1d6b8..0000000 --- a/8.2.1/extending/concepts.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A macro is a function that instantiates rules. Macros come in two flavors: -[symbolic macros](/extending/macros) (new in Bazel 8) and [legacy -macros](/extending/legacy-macros). The two flavors of macros are defined -differently, but behave almost the same from the point of view of a user. A -macro is useful when a `BUILD` file is getting too repetitive or too complex, as -it lets you reuse some code. The function is evaluated as soon as the `BUILD` -file is read. After the evaluation of the `BUILD` file, Bazel has little -information about macros. If your macro generates a `genrule`, Bazel will -behave *almost* as if you declared that `genrule` in the `BUILD` file. (The one -exception is that targets declared in a symbolic macro have [special visibility -semantics](/extending/macros#visibility): a symbolic macro can hide its internal -targets from the rest of the package.) - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro; we recommend a symbolic -macro, unless you need to support older Bazel versions. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most users will -never have to write one; they will only load and call existing rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to reuse some code. - Then [learn more about macros](/extending/macros) and [using them to create - "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you -may want to write [aspects](/extending/aspects) and [repository -rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/8.2.1/extending/depsets.mdx b/8.2.1/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/8.2.1/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/8.2.1/extending/exec-groups.mdx b/8.2.1/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/8.2.1/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/8.2.1/extending/platforms.mdx b/8.2.1/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/8.2.1/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/8.2.1/extending/repo.mdx b/8.2.1/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/8.2.1/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/8.2.1/extending/rules.mdx b/8.2.1/extending/rules.mdx deleted file mode 100644 index c91939e..0000000 --- a/8.2.1/extending/rules.mdx +++ /dev/null @@ -1,1244 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`genrule` and `filegroup`, provide some core support. -By defining your own rules, you can add support for languages and tools -that Bazel doesn't support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function doesn't run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [rule kind](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable = True`), or specifically -a test executable (with `test = True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule can't be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user doesn't -specify it in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which can't be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can't actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency using that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files using a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -There's a legacy struct style, which is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel doesn't -perform file I/O during the analysis phase, these objects can't be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files can't be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies using that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file = True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable = True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). -Often, the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive = [headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with = ",") - args.add_joined("-s", srcs, join_with = ",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it can't depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs won't be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which decides which actions to -execute. It is an error if there is a cycle in the dependency graph. Creating -an action doesn't guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of provider objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions won't be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields = { - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }, -) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances satisfy certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments don't match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# Keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {"files_to_link": files_to_link, "headers": all_headers} - -ExampleInfo, _new_exampleinfo = provider( - fields = ["files_to_link", "headers"], - init = _exampleinfo_init, -) -``` - -A rule implementation may then instantiate the provider as follows: - -```python -ExampleInfo( - files_to_link = my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers -) -``` - -The raw constructor can be used to define alternative public factory functions -that don't go through the `init` logic. For example, exampleinfo.bzl -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code can't load it and -generate arbitrary provider instances. - -Another use for `init` is to prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable = True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you don't specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it doesn't support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes can't be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size = "small", **kwargs): - _example_test(size = size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment, or command line -argument or flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs using [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they don't define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built-in the -"target" configuration, while tools that should run on the execution platform -are built-in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable = True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg = "exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg = "target"` in -the attribute. - -`cfg = "target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable = False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg = my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp` and `java`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns -`True` in coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"exec"` in the attribute schema. in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules can't do this validation at analysis time because rules can't read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action doesn't produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action won't be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path], - ) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec" - ), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel doesn't -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a [`struct`](/rules/lib/builtins/struct) returned by the rule's -implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Such providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -*This style is deprecated and should not be used in new code;* see following for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x = "foo", ...) - modern_data = MyInfo(y = "bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus don't create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the preceding syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept or require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/8.2.1/extending/toolchains.mdx b/8.2.1/extending/toolchains.mdx deleted file mode 100644 index b904cbe..0000000 --- a/8.2.1/extending/toolchains.mdx +++ /dev/null @@ -1,600 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as -input a set of required toolchain types, the target platform, the list of -available execution platforms, and the list of available toolchains. Its outputs -are a selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. The list of available toolchains is filtered to remove any toolchains - specifying `target_settings` that don't match the current configuration. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/8.2.1/external/advanced.mdx b/8.2.1/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/8.2.1/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/8.2.1/external/lockfile.mdx b/8.2.1/external/lockfile.mdx deleted file mode 100644 index f2a75b2..0000000 --- a/8.2.1/external/lockfile.mdx +++ /dev/null @@ -1,277 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/8.2.1/external/module.mdx b/8.2.1/external/module.mdx deleted file mode 100644 index 6a9cf13..0000000 --- a/8.2.1/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}+{{ "" }}version{{ -"" }}` (for example, `bazel_skylib+1.0.3`) or `{{ "" }}module_name{{ -"" }}+` (for example, `bazel_features+`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/8.2.1/help.mdx b/8.2.1/help.mdx deleted file mode 100644 index b2976e6..0000000 --- a/8.2.1/help.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2023](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsefrwb_ySGRi_bvQejpO_Tj) - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/8.2.1/install/bazelisk.mdx b/8.2.1/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/8.2.1/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/8.2.1/install/compile-source.mdx b/8.2.1/install/compile-source.mdx deleted file mode 100644 index a228b22..0000000 --- a/8.2.1/install/compile-source.mdx +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/8.2.1/install/completion.mdx b/8.2.1/install/completion.mdx deleted file mode 100644 index 856784c..0000000 --- a/8.2.1/install/completion.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/8.2.1/install/docker-container.mdx b/8.2.1/install/docker-container.mdx deleted file mode 100644 index 3a5d017..0000000 --- a/8.2.1/install/docker-container.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -ubuntu@5a99103747c6:~$ git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -ubuntu@5a99103747c6:~$ -``` - -Explore the container contents. - -```posix-terminal -ubuntu@5a99103747c6:~$ gcc --version -gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 -Copyright (C) 2019 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -ubuntu@5a99103747c6:~$ java -version -openjdk version "1.8.0_362" -OpenJDK Runtime Environment (build 1.8.0_362-8u372-ga~us1-0ubuntu1~20.04-b09) -OpenJDK 64-Bit Server VM (build 25.362-b09, mixed mode) - -ubuntu@5a99103747c6:~$ python -V -Python 3.8.10 - -ubuntu@5a99103747c6:~$ bazel version -WARNING: Invoking Bazel in batch mode since it is not invoked from within a workspace (below a directory having a WORKSPACE file). -Extracting Bazel installation... -Build label: 6.2.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Jun 2 16:59:58 2023 (1685725198) -Build timestamp: 1685725198 -Build timestamp as int: 1685725198 -``` - -## Explore the Bazel Dockerfile - -If you want to check how the Bazel Docker image is built, you can find its Dockerfile at [bazelbuild/continuous-integration/bazel/oci](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). diff --git a/8.2.1/install/ide.mdx b/8.2.1/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/8.2.1/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/8.2.1/install/index.mdx b/8.2.1/install/index.mdx deleted file mode 100644 index 10f53c4..0000000 --- a/8.2.1/install/index.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -You can find available Bazel releases on our [release page](/release). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Homebrew](https://formulae.brew.sh/formula/bazel) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/8.2.1/install/os-x.mdx b/8.2.1/install/os-x.mdx deleted file mode 100644 index 9a0f3f8..0000000 --- a/8.2.1/install/os-x.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" - -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/8.2.1/install/suse.mdx b/8.2.1/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/8.2.1/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/8.2.1/install/ubuntu.mdx b/8.2.1/install/ubuntu.mdx deleted file mode 100644 index a31bd2f..0000000 --- a/8.2.1/install/ubuntu.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y - -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg - -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings - -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel - -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/8.2.1/migrate/index.mdx b/8.2.1/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/8.2.1/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/8.2.1/migrate/maven.mdx b/8.2.1/migrate/maven.mdx deleted file mode 100644 index 38aaffc..0000000 --- a/8.2.1/migrate/maven.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences between Maven -and Bazel, and provides a migration example using the Guava project. - -When migrating from any build tool to Bazel, it's best to have both build tools -running in parallel until you have fully migrated your development team, CI -system, and any other relevant systems. You can run Maven and Bazel in the same -repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -, it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial [Introduction to Bazel: - Build Java](/start/java) before you start migrating. The tutorial explains - Bazel's concepts, structure, and label syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build files - and multiple targets per `BUILD` file, allowing for builds that are more - incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does not - automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java - package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the MODULE.bazel file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the [Guava -project](https://github.com/google/guava) from Maven to Bazel. The -Guava project used is release `v31.1`. The examples using Guava do not walk -through each step in the migration, but they do show the files and contents that -are generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the MODULE.bazel file - -Create a file named `MODULE.bazel` at the root of your project. If your project -has no external dependencies, this file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the MODULE.bazel -file. You can use `rules_jvm_external` to manage dependencies from Maven. For -instructions about using this ruleset, see [the -README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external) -. - -#### Guava project example: external dependencies - -You can list the external dependencies of the [Guava -project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_jvm_external", version = "6.2") -maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") -maven.install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -use_repo(maven, "maven") -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your -project should be built. Unlike Maven with its one `pom.xml` file, Bazel can use -many `BUILD` files to build a project. These files specify multiple build -targets, which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file at the root of -your project and using it to do an initial build using Bazel. Then, you refine -your build by adding more `BUILD` files with more granular targets. - -1. In the same directory as your `MODULE.bazel` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to build - your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples - above, the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. - * Take a look at the [example below of this top-level BUILD - file](#guava-2) from the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build your - project to ensure that it works. On the command line, from your workspace - directory, use `bazel build //:everything` to build your project with Bazel. - - The project has now been successfully built with Bazel. You will need to add - more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used to -build the entire project. Here are the contents of this initial `BUILD` file in -the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - javacopts = ["-XepDisableAllChecks"], - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your -first build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with Java - packages that have the fewest dependencies and work you way up to packages - with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number of packages - grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add a - `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build file. - Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel to -produce all of your builds. diff --git a/8.2.1/migrate/xcode.mdx b/8.2.1/migrate/xcode.mdx deleted file mode 100644 index 986cd11..0000000 --- a/8.2.1/migrate/xcode.mdx +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps for -converting an Xcode project to a Bazel project. It also provides troubleshooting -solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present within - the workspace directory or specified as dependencies in the `MODULE.bazel` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the [iOS app - tutorial](/start/ios-app)). You should understand the Bazel workspace, - including the `MODULE.bazel` and `BUILD` files, as well as the concepts of - targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see [Working with external -dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `MODULE.bazel` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `MODULE.bazel` file - -Create a `MODULE.bazel` file in a new directory. This directory becomes the -Bazel workspace root. If the project uses no external dependencies, this file -can be empty. If the project depends on files or packages that are not in one of -the project's directories, specify these external dependencies in the -`MODULE.bazel` file. - -Note: Place the project source code within the directory tree containing the -`MODULE.bazel` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with -[swift_bazel](https://github.com/cgrindel/swift_bazel), you must -convert them into Bazel packages as described in the [following -tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/) -. - -Note: SwiftPM support is a manual process with many variables. SwiftPM -integration with Bazel has not been fully verified and is not officially -supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create the -`BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see [Workspaces, -packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a -[`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an -[`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build -rules](https://github.com/bazelbuild/rules_apple) support running -unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) - to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for - [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), - [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) - and - [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) target for each -Objective-C library and a -[`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) function to include -all sources and/or headers of a certain type. Use it carefully as it might -include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in -the [rules_apple examples -directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For -example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - -For more information on build rules, see [Apple Rules for -Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number of - packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix build - errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `MODULE.bazel` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features) -. - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/8.2.1/query/aquery.mdx b/8.2.1/query/aquery.mdx deleted file mode 100644 index 2176ff6..0000000 --- a/8.2.1/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noslim_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/8.2.1/query/cquery.mdx b/8.2.1/query/cquery.mdx deleted file mode 100644 index bd829c8..0000000 --- a/8.2.1/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but -[`allrdeps`](/query/language#allrdeps), -[`buildfiles`](/query/language#buildfiles), -[`rbuildfiles`](/query/language#rbuildfiles), -[`siblings`](/query/language#siblings), [`tests`](/query/language#tests), and -[`visible`](/query/language#visible). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a previous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genrule` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/8.2.1/reference/glossary.mdx b/8.2.1/reference/glossary.mdx deleted file mode 100644 index 3b0b497..0000000 --- a/8.2.1/reference/glossary.mdx +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Legacy macro - -A flavor of [macro](#macro) which is declared as an ordinary -[Starlark](#starlark) function, and which runs as a side effect of executing a -`BUILD` file. - -Legacy macros can do anything a function can. This means they can be convenient, -but they can also be harder to read, write, and use. A legacy macro might -unexpectedly mutate its arguments or fail when given a `select()` or ill-typed -argument. - -Contrast with [symbolic macros](#symbolic-macro). - -**See also:** [Legacy macro documentation](/extending/legacy-macros) - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) callable. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -Comes in two flavors: [symbolic macros](#symbolic-macro) (since Bazel 8) and -[legacy macros](#legacy-macro). - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Symbolic macro - -A flavor of [macro](#macro) which is declared with a [rule](#rule)-like -[attribute](#attribute) schema, allows hiding internal declared -[targets](#target) from their own package, and enforces a predictable naming -pattern on the targets that the macro declares. Designed to avoid some of the -problems seen in large [legacy macro](#legacy-macro) codebases. - -**See also:** [Symbolic macro documentation](/extending/macros) - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/8.2.1/reference/skyframe.mdx b/8.2.1/reference/skyframe.mdx deleted file mode 100644 index ba9149f..0000000 --- a/8.2.1/reference/skyframe.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build is achieved by evaluating the node that represents the build request. - -First, Bazel finds the `SkyFunction` corresponding to the key of the top-level -`SkyKey`. The function then requests the evaluation of the nodes it needs to -evaluate the top-level node, which in turn result in other `SkyFunction` calls, -until the leaf nodes are reached. Leaf nodes are usually ones that represent -input files in the file system. Finally, Bazel ends up with the value of the -top-level `SkyValue`, some side effects (such as output files in the file -system) and a directed acyclic graph of the dependencies between the nodes -involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in -advance all of the nodes it needs to do its job. A simple example is evaluating -an input file node that turns out to be a symlink: the function tries to read -the file, realizes that it is a symlink, and thus fetches the file system node -representing the target of the symlink. But that itself can be a symlink, in -which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the -services provided to it by an interface called `SkyFunction.Environment`. These -are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If - the node is available, its value is returned, otherwise, `null` is returned - and the function itself is expected to return `null`. In the latter case, - the dependent node is evaluated, and then the original node builder is - invoked again, but this time the same `env.getValue` call will return a - non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. - This does essentially the same, except that the dependent nodes are - evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs - to be taken that two different functions avoid stepping on each other's - toes. In general, write side effects (where data flows outwards from Bazel) - are okay, read side effects (where data flows inwards into Bazel without a - registered dependency) are not, because they are an unregistered dependency - and as such, can cause incorrect incremental builds. - -Well-behaved `SkyFunction` implementations avoid accessing data in any other way -than requesting dependencies (such as by directly reading the file system), -because that results in Bazel not registering the data dependency on the file -that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` -value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on - other nodes, Bazel can guarantee that if the input state is the same, the - same data is returned. If all sky functions are deterministic, this means - that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions - is recorded, Bazel can invalidate only the exact set of nodes that need to - be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of - requesting dependencies, functions that don't depend on each other can be - run in parallel and Bazel can guarantee that the result is the same as if - they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel -can build up a complete data flow graph from the input files to the output -files, and use this information to only rebuild those nodes that actually need -to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one -and the top-down one. Which one is optimal depends on how the dependency graph -looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed - inputs is known, all the nodes are invalidated that transitively depend on - changed files. This is optimal if the same top-level node will be built - again. Note that bottom-up invalidation requires running `stat()` on all - input files of the previous build to determine if they were changed. This - can be improved by using `inotify` or a similar mechanism to learn about - changed files. - -* During top-down invalidation, the transitive closure of the top-level node - is checked and only those nodes are kept whose transitive closure is clean. - This is better if the node graph is large, but the next build only needs a - small subset of it: bottom-up invalidation would invalidate the larger graph - of the first build, unlike top-down invalidation, which just walks the small - graph of second build. - -Bazel only does bottom-up invalidation. - -To get further incrementality, Bazel uses _change pruning_: if a node is -invalidated, but upon rebuild, it is discovered that its new value is the same -as its old value, the nodes that were invalidated due to a change in this node -are "resurrected". - -This is useful, for example, if one changes a comment in a C++ file: then the -`.o` file generated from it will be the same, thus, it is unnecessary to call -the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an -all-or-nothing affair: when a dependency changes, the dependent node is always -rebuilt from scratch, even if a better algorithm would exist that would mutate -the old value of the node based on the changes. A few examples where this would -be useful: - -* Incremental linking -* When a single class file changes in a JAR file, it is possible - modify the JAR file in-place instead of building it from scratch again. - -The reason why Bazel does not support these things in a principled way -is twofold: - -* There were limited performance gains. -* Difficulty to validate that the result of the mutation is the same as that - of a clean rebuild would be, and Google values builds that are bit-for-bit - repeatable. - -Until now, it was possible to achieve good enough performance by decomposing an -expensive build step and achieving partial re-evaluation that way. For example, -in an Android app, you can split all the classes into multiple groups and dex -them separately. This way, if classes in a group are unchanged, the dexing does -not have to be redone. - -## Mapping to Bazel concepts - -This is high level summary of the key `SkyFunction` and `SkyValue` -implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, the - function also computes additional information in order to detect changes to - the file. This is the lowest level node in the Skyframe graph and has no - dependencies. -* **FileValue**. Used by anything that cares about the actual contents or - resolved path of a file. Depends on the corresponding `FileStateValue` and - any symlinks that need to be resolved (such as the `FileValue` for `a/b` - needs the resolved path of `a` and the resolved path of `a/b`). The - distinction between `FileValue` and `FileStateValue` is important because - the latter can be used in cases where the contents of the file are not - actually needed. For example, the file contents are irrelevant when - evaluating file system globs (such as `srcs=glob(["*/*.java"])`). -* **DirectoryListingStateValue**. The result of `readdir()`. Like - `FileStateValue`, this is the lowest level node and has no dependencies. -* **DirectoryListingValue**. Used by anything that cares about the entries of - a directory. Depends on the corresponding `DirectoryListingStateValue`, as - well as the associated `FileValue` of the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on - the `FileValue` of the associated `BUILD` file, and also transitively on any - `DirectoryListingValue` that is used to resolve the globs in the package - (the data structure representing the contents of a `BUILD` file internally). -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple - of the set of actions generated during the analysis of a target and - information provided to dependent configured targets. Depends on the - `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` - of direct dependencies, and a special node representing the build - configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an - output artifact. Artifacts are almost equivalent to files, and are used to - refer to files during the actual execution of build steps. Source files - depends on the `FileValue` of the associated node, and output artifacts - depend on the `ActionExecutionValue` of whatever action generates the - artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on - the `ArtifactValues` of its input files. The action it executes is contained - within its SkyKey, which is contrary to the concept that SkyKeys should be - small. Note that `ActionExecutionValue` and `ArtifactValue` are unused if - the execution phase does not run. - -As a visual aid, this diagram shows the relationships between -SkyFunction implementations after a build of Bazel itself: - -![A graph of SkyFunction implementation relationships](/reference/skyframe.png) diff --git a/8.2.1/release/backward-compatibility.mdx b/8.2.1/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/8.2.1/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/8.2.1/release/index.mdx b/8.2.1/release/index.mdx deleted file mode 100644 index a3cc526..0000000 --- a/8.2.1/release/index.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 9 | Rolling| [Check rolling release page](/release/rolling) | N/A | -| Bazel 8 | Active| [8.0.0](https://github.com/bazelbuild/bazel/releases/tag/8.0.0) | December 2027 | -| Bazel 7 | Maintenance| [7.4.1](https://github.com/bazelbuild/bazel/releases/tag/7.4.1) | Dec 2026 | -| Bazel 6 | Maintenance | [6.5.0](https://github.com/bazelbuild/bazel/releases/tag/6.5.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Deprecated | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel LTS releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 5 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Backport changes using Cherry-Pick Request Issue for Bazel maintainers. - * Bazel maintainers can request to cherry-pick specific commit(s) - to a release branch. This process is initiated by creating a - cherry-pick request on GitHub. Here's how to do it. - 1. Open the [cherry-pick request](https://github.com/bazelbuild/bazel/issues/new?assignees=&labels=&projects=&template=cherry_pick_request.yml) - 2. Fill in the request details - * Title: Provide a concise and descriptive title for the request. - * Commit ID(s): Enter the ID(s) of the commit(s) you want to - cherry-pick. If there are multiple commits, then separate - them with commas. - * Category: Specify the category of the request. - * Reviewer(s): For multiple reviewers, separate their GitHub - ID's with commas. - 3. Set the milestone - * Find the "Milestone" section and click the setting. - * Select the appropriate X.Y.Z release blockers. This action - triggers the cherry-pick bot to process your request - for the "release-X.Y.Z" branch. - 4. Submit the Issue - * Once all details are filled in and the miestone is set, - submit the issue. - - * The cherry-pick bot will process the request and notify - if the commit(s) are eligible for cherry-picking. If - the commits are cherry-pickable, which means there's no - merge conflict while cherry-picking the commit, then - the bot will create a new pull request. When the pull - request is approved by a member of the Bazel team, the - commits are cherry-picked and merged to the release branch. - For a visual example of a completed cherry-pick request, - refer to this - [example](https://github.com/bazelbuild/bazel/issues/20230) - . - -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created; cherry-picks are limited to critical - fixes only. If a cherry-pick is needed, the requester must answer the - following questions: Why is this change critical, and what benefits does - it provide? What is the likelihood of this change introducing a - regression? -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/8.2.1/release/rule-compatibility.mdx b/8.2.1/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/8.2.1/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/8.2.1/remote/bep-examples.mdx b/8.2.1/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/8.2.1/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/8.2.1/remote/bep-glossary.mdx b/8.2.1/remote/bep-glossary.mdx deleted file mode 100644 index 3bd11ee..0000000 --- a/8.2.1/remote/bep-glossary.mdx +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/8.2.1/remote/bep.mdx b/8.2.1/remote/bep.mdx deleted file mode 100644 index bafdaa9..0000000 --- a/8.2.1/remote/bep.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L131-L140) -or [structured -information](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L194-L205) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L1276). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--bes_instance_name` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/8.2.1/remote/cache-local.mdx b/8.2.1/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/8.2.1/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/8.2.1/remote/cache-remote.mdx b/8.2.1/remote/cache-remote.mdx deleted file mode 100644 index a614f4f..0000000 --- a/8.2.1/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/8.2.1/remote/caching.mdx b/8.2.1/remote/caching.mdx deleted file mode 100644 index 8fd6adc..0000000 --- a/8.2.1/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [NativeLink](https://github.com/TraceMachina/nativelink) diff --git a/8.2.1/remote/creating.mdx b/8.2.1/remote/creating.mdx deleted file mode 100644 index 0e46a07..0000000 --- a/8.2.1/remote/creating.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output message describing any errors encountered in processing or executing -the request. A worker should capture the `stdout` and `stderr` of any tool it -calls and report them through the `WorkResponse`. Writing it to the `stdout` of -the worker process is unsafe, as it will interfere with the worker protocol. -Writing it to the `stderr` of the worker process is safe, but the result is -collected in a per-worker log file instead of ascribed to individual actions. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/8.2.1/remote/multiplex.mdx b/8.2.1/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/8.2.1/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/8.2.1/remote/output-directories.mdx b/8.2.1/remote/output-directories.mdx deleted file mode 100644 index bdbe029..0000000 --- a/8.2.1/remote/output-directories.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `${XDG_CACHE_HOME}/bazel` (or - `~/.cache/bazel`, if the `XDG_CACHE_HOME` environment variable is not set) on - Linux, `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <..._main> <== Symlink to execRoot - bazel-out => <...bazel-out> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - _tmp/actions/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/8.2.1/remote/persistent.mdx b/8.2.1/remote/persistent.mdx deleted file mode 100644 index 1a56946..0000000 --- a/8.2.1/remote/persistent.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/8.2.1/remote/rbe.mdx b/8.2.1/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/8.2.1/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/8.2.1/remote/rules.mdx b/8.2.1/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/8.2.1/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/8.2.1/remote/sandbox.mdx b/8.2.1/remote/sandbox.mdx deleted file mode 100644 index cfb9be4..0000000 --- a/8.2.1/remote/sandbox.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --define=EXECUTOR=remote -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/8.2.1/remote/workspace.mdx b/8.2.1/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/8.2.1/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/8.2.1/rules/bzl-style.mdx b/8.2.1/rules/bzl-style.mdx deleted file mode 100644 index 941028a..0000000 --- a/8.2.1/rules/bzl-style.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Conventionally, `name` should be the first argument when defining a macro. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/8.2.1/rules/challenges.mdx b/8.2.1/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/8.2.1/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/8.2.1/rules/deploying.mdx b/8.2.1/rules/deploying.mdx deleted file mode 100644 index 3fe2c86..0000000 --- a/8.2.1/rules/deploying.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - MODULE.bazel - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### MODULE.bazel - -In the project's `MODULE.bazel`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -module(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains a brief description -of your ruleset, and the API users should expect. - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies, which you'll need to specify in -your MODULE.bazel file. - -#### Registering toolchains - -Your rules might also register toolchains, which you can also specify in the -MODULE.bazel file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `MODULE.bazel` file. This snippet in general will look as follows: - -``` -bazel_dep(name = "rules_", version = "") -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to add a dependency on your ruleset in their `MODULE.bazel` file. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/8.2.1/rules/errors/read-only-variable.mdx b/8.2.1/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/8.2.1/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/8.2.1/rules/faq.mdx b/8.2.1/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/8.2.1/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/8.2.1/rules/index.mdx b/8.2.1/rules/index.mdx deleted file mode 100644 index 2a6c3eb..0000000 --- a/8.2.1/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Rust](https://github.com/bazelbuild/rules_rust) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/8.2.1/rules/legacy-macro-tutorial.mdx b/8.2.1/rules/legacy-macro-tutorial.mdx deleted file mode 100644 index 44cdcfb..0000000 --- a/8.2.1/rules/legacy-macro-tutorial.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 'Creating a Legacy Macro' ---- - - - -IMPORTANT: This tutorial is for [*legacy macros*](/extending/legacy-macros). If -you only need to support Bazel 8 or newer, we recommend using [symbolic -macros](/extending/macros) instead; take a look at [Creating a Symbolic -Macro](../macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a legacy macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -```starlark -def miniature(name, src, size = "100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - # Note that the line below will fail if `src` is not a filename string - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - - * By convention, legacy macros have a `name` argument, just like rules. - - * To document the behavior of a legacy macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - - * To call a `genrule`, or any other native rule, prefix with `native.`. - - * Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like - `visibility`, or `tags`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -And finally, a **warning note**: the macro assumes that `src` is a filename -string (otherwise, `outs = ["small_" + src]` will fail). So `src = "image.png"` -works; but what happens if the `BUILD` file instead used `src = -"//other/package:image.png"`, or even `src = select(...)`? - -You should make sure to declare such assumptions in your macro's documentation. -Unfortunately, legacy macros, especially large ones, tend to be fragile because -it can be hard to notice and document all such assumptions in your code – and, -of course, some users of the macro won't read the documentation. We recommend, -if possible, instead using [symbolic macros](/extending/macros), which have -built\-in checks on attribute types. diff --git a/8.2.1/rules/macro-tutorial.mdx b/8.2.1/rules/macro-tutorial.mdx deleted file mode 100644 index 93825aa..0000000 --- a/8.2.1/rules/macro-tutorial.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Creating a Symbolic Macro' ---- - - - -IMPORTANT: This tutorial is for [*symbolic macros*](/extending/macros) – the new -macro system introduced in Bazel 8. If you need to support older Bazel versions, -you will want to write a [legacy macro](/extending/legacy-macros) instead; take -a look at [Creating a Legacy Macro](../legacy-macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a symbolic macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define an *implementation function* and a *macro declaration* in a separate -`.bzl` file, and call the file `miniature.bzl`: - -```starlark -# Implementation function -def _miniature_impl(name, visibility, src, size, **kwargs): - native.genrule( - name = name, - visibility = visibility, - srcs = [src], - outs = [name + "_small_" + src.name], - cmd = "convert $< -resize " + size + " $@", - **kwargs, - ) - -# Macro declaration -miniature = macro( - doc = """Create a miniature of the src image. - - The generated file name will be prefixed with `name + "_small_"`. - """, - implementation = _miniature_impl, - # Inherit most of genrule's attributes (such as tags and testonly) - inherit_attrs = native.genrule, - attrs = { - "src": attr.label( - doc = "Image file", - allow_single_file = True, - # Non-configurable because our genrule's output filename is - # suffixed with src's name. (We want to suffix the output file with - # srcs's name because some tools that operate on image files expect - # the files to have the right file extension.) - configurable = False, - ), - "size": attr.string( - doc = "Output size in WxH format", - default = "100x100", - ), - # Do not allow callers of miniature() to set srcs, cmd, or outs - - # _miniature_impl overrides their values when calling native.genrule() - "srcs": None, - "cmd": None, - "outs": None, - }, -) -``` - -A few remarks: - - * Symbolic macro implementation functions must have `name` and `visibility` - parameters. They should used for the macro's main target. - - * To document the behavior of a symbolic macro, use `doc` parameters for - `macro()` and its attributes. - - * To call a `genrule`, or any other native rule, use `native.`. - - * Use `**kwargs` to forward the extra inherited arguments to the underlying - `genrule` (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful so that a user can set standard attributes like `tags` or - `testonly`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/8.2.1/rules/performance.mdx b/8.2.1/rules/performance.mdx deleted file mode 100644 index 5870c0d..0000000 --- a/8.2.1/rules/performance.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -The [`--experimental_command_profile`](https://bazel.build/reference/command-line-reference#flag--experimental_command_profile) -flag may be used to capture Java Flight Recorder profiles of various kinds -(cpu time, wall time, memory allocations and lock contention). - -The [`--starlark_cpu_profile`](https://bazel.build/reference/command-line-reference#flag--starlark_cpu_profile) -flag may be used to write a pprof profile of CPU usage by all Starlark threads. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.4 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/8.2.1/rules/rules-tutorial.mdx b/8.2.1/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/8.2.1/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/8.2.1/rules/testing.mdx b/8.2.1/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/8.2.1/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/8.2.1/rules/verbs-tutorial.mdx b/8.2.1/rules/verbs-tutorial.mdx deleted file mode 100644 index db7757e..0000000 --- a/8.2.1/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following symbolic macro (available in Bazel 8 or newer) to -create targets for both of the above rules together: - -```starlark -def _sphinx_site_impl(name, visibility, srcs, **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. We - # set `visibility = visibility` to make it visible to callers of the - # macro. - _sphinx_site(name = name, visibility = visibility, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. We don't want it to be visible to callers of - # our macro, so we omit visibility for it. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) - -sphinx_site = macro( - implementation = _sphinx_site_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, - # Inherit common attributes like tags and testonly - inherit_attrs = "common", -) -``` - -Or, if you need to support Bazel releases older than Bazel 8, you would instead -define a legacy macro: - -```starlark -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/8.2.1/run/bazelrc.mdx b/8.2.1/run/bazelrc.mdx deleted file mode 100644 index 15f89c8..0000000 --- a/8.2.1/run/bazelrc.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `MODULE.bazel` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage`, `fetch`, and `vendor` inherit from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### `--enable_platform_specific_config` - -Platform specific configs in the `.bazelrc` can be automatically enabled using -`--enable_platform_specific_config`. For example, if the host OS is Linux and -the `build` command is run, the `build:linux` configuration will be -automatically enabled. Supported OS identifiers are `linux`, `macos`, `windows`, -`freebsd`, and `openbsd`. Enabling this flag is equivalent to using -`--config=linux` on Linux, `--config=windows` on Windows, and so on. - -See [--enable_platform_specific_config](/reference/command-line-reference#flag--enable_platform_specific_config). - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/8.2.1/run/client-server.mdx b/8.2.1/run/client-server.mdx deleted file mode 100644 index 1868635..0000000 --- a/8.2.1/run/client-server.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. Each server can handle at most one -invocation at a time; further concurrent invocations will either block or -fail-fast (see `--block_for_lock`). - -When you run `bazel`, you're running the client. The client finds the server -based on the [output base](/run/scripts#output-base-option), which by default is -determined by the path of the base workspace directory and your userid, so if -you build in multiple workspaces, you'll have multiple output bases and thus -multiple Bazel server processes. Multiple users on the same workstation can -build concurrently in the same workspace because their output bases will differ -(different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/8.2.1/run/scripts.mdx b/8.2.1/run/scripts.mdx deleted file mode 100644 index f267c90..0000000 --- a/8.2.1/run/scripts.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you should be mindful that each Blaze server process can handle at most one -invocation [at a time](/run/client-server#clientserver-implementation). -Depending on your situation it may make sense for each instance of your script -to wait its turn, or it may make sense to use `--output_base` to run multiple -Blaze servers and use those. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Transient error publishing results to the Build Event Service. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Persistent error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. -- `49` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/8.2.1/start/android-app.mdx b/8.2.1/start/android-app.mdx deleted file mode 100644 index b0e6f1b..0000000 --- a/8.2.1/start/android-app.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel workspace that contains the source code - for the app and a `MODULE.bazel` file that identifies the top level of the - workspace directory. -* Update the `MODULE.bazel` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `MODULE.bazel` file at -its root. - -The `MODULE.bazel` file may be empty or may contain references to [external -dependencies](/external/overview) required to build your project. - -First, run the following command to create an empty `MODULE.bazel` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch MODULE.bazel` | -| Windows (Command Prompt) | `type nul > MODULE.bazel` | -| Windows (PowerShell) | `New-Item MODULE.bazel -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`MODULE.bazel` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`MODULE.bazel` file so that Bazel knows where to find them. - -Add the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android", version = "0.5.1") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and use `rules_android_ndk` by adding the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android_ndk", version = "0.1.2") -``` - - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `MODULE.bazel` file. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `MODULE.bazel` file is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/8.2.1/start/cpp.mdx b/8.2.1/start/cpp.mdx deleted file mode 100644 index adb7c71..0000000 --- a/8.2.1/start/cpp.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You're in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel's context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven't -already. This tutorial uses Git for source control, so for best results [install -Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` -directory. - -Take a look at how it's structured: - -```none -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── MODULE.bazel - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In the third and final -stage, you will build a project with multiple packages and build it with -multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your -[workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -Before you can build a project, you need to set up its workspace. A workspace -is a directory that holds your project's source files and Bazel's build outputs. -It also contains these significant files: - -* The `MODULE.bazel` file, which identifies the directory and its contents as - a Bazel workspace and lives at the root of the project's directory - structure. It's also where you specify your external dependencies. -* One or more [`BUILD` - files](https://bazel.build/reference/glossary#build-file), which tell Bazel - how to build different parts of the project. A directory within the - workspace that contains a `BUILD` file is a - [package](https://bazel.build/reference/glossary#package). (More on packages - later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `MODULE.bazel` in that directory. For the purposes of this -tutorial, a `MODULE.bazel` file is already present in each stage. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one -[rule](https://bazel.build/reference/glossary#rule) as a set of instructions, -which tells Bazel how to build the outputs you want, such as executable binaries -or libraries. Each instance of a build rule in the `BUILD` file is called a -[target](https://bazel.build/reference/glossary#target) and points to a specific -set of source files and -[dependencies](https://bazel.build/reference/glossary#dependency). A target can -also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -```bazel -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[`cc_binary` rule](https://bazel.build/reference/be/c-cpp#cc_binary). The rule -tells Bazel to build a self-contained executable binary from the -`hello-world.cc`> source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - -## Stage 1: single target, single package - -It's time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -```none -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── MODULE.bazel -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed "`Hello world`" message. - -Here's the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source -file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world -displays a single target with a single source file.") - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a -build is structured. In the next stage, you will add complexity by adding -another target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up -your builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -```none - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel -``` - -Take a look at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library (using -Bazel's built-in [`cc_library` -rule](https://bazel.build/reference/be/c-cpp#cc_library)), then the -`hello-world` binary. The `deps` attribute in the `hello-world` target tells -Bazel that the `hello-greet` library is required to build the `hello-world` -binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another "`Hello -world`": - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an -extra input named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after -modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency -graph for `hello-world` displays dependency changes after modification to the -file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look at the structure and contents of the -`cpp-tutorial/stage3` directory: - -```none -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -```bazel -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package -depends on the target in the `lib` -package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for -`hello-world` displays how the target in the main package depends on the target -in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You've now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use - cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the - tutorials for [Java](https://bazel.build/start/java), [Android - application](https://bazel.build/start/android-app), or [iOS - application](https://bazel.build/start/ios-app). -* To learn more about working with local and remote repositories, read about - [external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel's other rules, see this [reference - guide](https://bazel.build/rules). - -Happy building! diff --git a/8.2.1/start/ios-app.mdx b/8.2.1/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/8.2.1/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/8.2.1/start/java.mdx b/8.2.1/start/java.mdx deleted file mode 100644 index b892917..0000000 --- a/8.2.1/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -one source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/8.2.1/tutorials/cpp-dependency.mdx b/8.2.1/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/8.2.1/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/8.2.1/tutorials/cpp-labels.mdx b/8.2.1/tutorials/cpp-labels.mdx deleted file mode 100644 index 78d0dbc..0000000 --- a/8.2.1/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `MODULE.bazel` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/8.2.1/tutorials/cpp-use-cases.mdx b/8.2.1/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 6695cce..0000000 --- a/8.2.1/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── MODULE.bazel -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Include external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest) -. -You can add a dependency on it in the `MODULE.bazel` file to -download Google Test and make it available in your repository: - -```python -bazel_dep(name = "googletest", version = "1.15.2") -``` - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = [ - "-Iexternal/gtest/googletest/include", - "-Iexternal/gtest/googletest", - ], - deps = [ - "@googletest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/8.2.1/versions/index.mdx b/8.2.1/versions/index.mdx deleted file mode 100644 index 4290e57..0000000 --- a/8.2.1/versions/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The default documentation on this website represents the latest version at HEAD. -Each major and minor supported release will have a snapshot of the narrative and -reference documentation that follows the lifecycle of Bazel's version support. - -To see documentation for stable Bazel versions, use the "Versioned docs" -drop-down. - -To see documentation for older Bazel versions prior to Feb 2022, go to -[docs.bazel.build](https://docs.bazel.build/). diff --git a/8.3.1/about/faq.mdx b/8.3.1/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/8.3.1/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/8.3.1/about/intro.mdx b/8.3.1/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/8.3.1/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/8.3.1/about/roadmap.mdx b/8.3.1/about/roadmap.mdx deleted file mode 100644 index 2e18b78..0000000 --- a/8.3.1/about/roadmap.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -As the Bazel project continues to evolve in response to your needs, we want to -share our 2024 update. - -This roadmap describes current initiatives and predictions for the future of -Bazel development, giving you visibility into current priorities and ongoing -projects. - -## Bazel 8.0 Release - -We plan to bring Bazel 8.0 [long term support -(LTS)](https://bazel.build/release/versioning) to you in late 2024. -The following features are planned to be implemented. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive -dependencies, allowing projects to scale while staying fast and -resource-efficient. - -With Bazel 8, we will disable WORKSPACE support by default (it will still be -possible to enable it using `--enable_workspace`); with Bazel 9 WORKSPACE -support will be removed. Starting with Bazel 7.1, you can set -`--noenable_workspace` to opt into the new behavior. - -Bazel 8.0 will contain a number of enhancements to -[Bazel's external dependency management] -(https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) -functionality, including: - -* The new flag `--enable_workspace` can be set to `false` to completely - disable WORKSPACE functionality. -* New directory watching API (see - [#21435](https://github.com/bazelbuild/bazel/pull/21435), shipped in Bazel - 7.1). -* Improved scheme for generating canonical repository names for better - cacheability of actions across dependency version updates. - ([#21316](https://github.com/bazelbuild/bazel/pull/21316), shipped in Bazel - 7.1) -* An improved shared repository cache (see - [#12227](https://github.com/bazelbuild/bazel/issues/12227)). -* Vendor and offline mode support — allows users to run builds with - pre-downloaded dependencies (see - [#19563](https://github.com/bazelbuild/bazel/issues/19563)). -* Reduced merge conflicts in lock files - ([#20396](https://github.com/bazelbuild/bazel/issues/20369)). -* Segmented MODULE.bazel - ([#17880](https://github.com/bazelbuild/bazel/issues/17880)) -* Allow overriding module extension generated repository - ([#19301](https://github.com/bazelbuild/bazel/issues/19301)) -* Improved documentation (e.g. - [#18030](https://github.com/bazelbuild/bazel/issues/18030), - [#15821](https://github.com/bazelbuild/bazel/issues/15821)) and migration - guide and migration tooling. - -### Remote execution improvements - -* Add support for asynchronous execution, speeding up remote execution by - increased parallelism with flag `--jobs`. -* Make it easier to debug cache misses by a new compact execution log, - reducing its size by 100x and its runtime overhead significantly (see - [#18643](https://github.com/bazelbuild/bazel/issues/18643)). -* Implement garbage collection for the disk cache (see - [#5139](https://github.com/bazelbuild/bazel/issues/5139)). -* Implement remote output service to allow lazy downloading of arbitrary build - outputs (see - [#20933](https://github.com/bazelbuild/bazel/discussions/20933)). - -### Migration of Android, C++, Java, Python, and Proto rules - -Complete migration of Android, C++, Java, and Python rulesets to dedicated -repositories and decoupling them from the Bazel releases. This effort allows -Bazel users and rule authors to - -* Update rules independently of Bazel. -* Update and customize rules as needed. - -The new location of the rulesets is going to be `bazelbuild/rules_android`, -`rules_cc`, `rules_java`, `rules_python` and `google/protobuf`. `rules_proto` is -going to be deprecated. - -Bazel 8 will provide a temporary migration flag that will automatically use the -rulesets that were previously part of the binary from their repositories. All -the users of those rulesets are expected to eventually depend on their -repositories and load them similarly to other rulesets that were never part of -Bazel. - -Bazel 8 will also improve on the existing extending rules and subrule APIs and -mark them as non-experimental. - -### Starlark improvements - -* Symbolic Macros are a new way of writing macros that is friendlier to - `BUILD` users, macro authors, and tooling. Compared to legacy macros, which - Bazel has only limited insight into, symbolic macros help users avoid common - pitfalls and enforce best practices. -* Package finalizers are a proposed feature for adding first-class support for - custom package validation logic. They are intended to help us deprecate - `native.existing_rules()`. - -### Configurability - -* Output path mapping continues to stabilize: promising better remote cache - performance and build speed for rule designers who use transitions. -* Automatically set build flags suitable for a given `--platforms`. -* Define project-supported flag combinations and automatically build targets - with default flags without having to set bazelrcs. -* Don't redo build analysis every time build flags change. - -### Project Skyfocus - minimize retained data structures - -Bazel holds a lot of state in RAM for fast incremental builds. However, -developers often change a small subset of the source files (e.g. almost never -one of the external dependencies). With Skyfocus, Bazel will provide an -experimental way to drop unnecessary incremental state and reduce Bazel's memory -footprint, while still providing the same fast incremental build experience. - -The initial scope aims to improve the retained heap metric only. Peak heap -reduction is a possibility, but not included in the initial scope. - -### Misc - -* Mobile install v3, a simpler and better maintained approach to incrementally - deploy Android applications. -* Garbage collection for repository caches and Bazel's `install_base`. -* Reduced sandboxing overhead. - -### Bazel-JetBrains* IntelliJ IDEA support - -Incremental IntelliJ plugin updates to support the latest JetBrains plugin -release. - -*This roadmap snapshots targets, and should not be taken as guarantees. -Priorities are subject to change in response to developer and customer -feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the -[Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/8.3.1/about/vision.mdx b/8.3.1/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/8.3.1/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/8.3.1/about/why.mdx b/8.3.1/about/why.mdx deleted file mode 100644 index 97cfa36..0000000 --- a/8.3.1/about/why.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Why Bazel?' ---- - - - -Bazel is a [fast](#fast), [correct](#correct), and [extensible](#extensible) -build tool with [integrated testing](#integrated-testing) that supports multiple -[languages](#multi-language), [repositories](#multi-repository), and -[platforms](#multi-platform) in an industry-leading [ecosystem](#ecosystem). - -## Bazel is fast - -Bazel knows exactly what input files each build command needs, avoiding -unnecessary work by re-running only when the set of input files have -changed between each build. -It runs build commands with as much parallelism as possible, either within the -same computer or on [remote build nodes](/remote/rbe). If the structure of build -allows for it, it can run thousands of build or test commands at the same time. - -This is supported by multiple caching layers, in memory, on disk and on the -remote build farm, if available. At Google, we routinely achieve cache hit rates -north of 99%. - -## Bazel is correct - -Bazel ensures that your binaries are built *only* from your own -source code. Bazel actions run in individual sandboxes and Bazel tracks -every input file of the build, only and always re-running build -commands when it needs to. This keeps your binaries up-to-date so that the -[same source code always results in the same binary](/basics/hermeticity), bit -by bit. - -Say goodbyte to endless `make clean` invocations and to chasing phantom bugs -that were in fact resolved in source code that never got built. - -## Bazel is extensible - -Harness the full power of Bazel by writing your own rules and macros to -customize Bazel for your specific needs across a wide range of projects. - -Bazel rules are written in [Starlark](/rules/language), our -in-house programming language that's a subset of Python. Starlark makes -rule-writing accessible to most developers, while also creating rules that can -be used across the ecosystem. - -## Integrated testing - -Bazel's [integrated test runner](/docs/user-manual#running-tests) -knows and runs only those tests needing to be re-run, using remote execution -(if available) to run them in parallel. Detect flakes early by using remote -execution to quickly run a test thousands of times. - -Bazel [provides facilities](/remote/bep) to upload test results to a central -location, thereby facilitating efficient communication of test outcomes, be it -on CI or by individual developers. - -## Multi-language support - -Bazel supports many common programming languages including C++, Java, -Kotlin, Python, Go, and Rust. You can build multiple binaries (for example, -backend, web UI and mobile app) in the same Bazel invocation without being -constrained to one language's idiomatic build tool. - -## Multi-repository support - -Bazel can [gather source code from multiple locations](/external/overview): you -don't need to vendor your dependencies (but you can!), you can instead point -Bazel to the location of your source code or prebuilt artifacts (e.g. a git -repository or Maven Central), and it takes care of the rest. - -## Multi-platform support - -Bazel can simultaneously build projects for multiple platforms including Linux, -macOS, Windows, and Android. It also provides powerful -[cross-compilation capabilities](/extending/platforms) to build code for one -platform while running the build on another. - -## Wide ecosystem - -[Industry leaders](/community/users) love Bazel, building a large -community of developers who use and contribute to Bazel. Find a tools, services -and documentation, including [consulting and SaaS offerings](/community/experts) -Bazel can use. Explore extensions like support for programming languages in -our [open source software repositories](/rules). diff --git a/8.3.1/advanced/performance/build-performance-breakdown.mdx b/8.3.1/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/8.3.1/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/8.3.1/advanced/performance/build-performance-metrics.mdx b/8.3.1/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/8.3.1/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/8.3.1/advanced/performance/iteration-speed.mdx b/8.3.1/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 2bbf839..0000000 --- a/8.3.1/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. - -To make this condition an error rather than a warning, you can use the -`--noallow_analysis_cache_discard` flag (introduced in Bazel 6.4.0) diff --git a/8.3.1/advanced/performance/json-trace-profile.mdx b/8.3.1/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 56e278c..0000000 --- a/8.3.1/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query, Bazel writes a profile into -the output base named `command-$INOVCATION_ID.profile.gz`, where -`$INOVCATION_ID` is the invocation identifier of the command. Bazel also creates -a symlink called `command.profile.gz` in the output base that points the profile -of the latest command. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Bazel keeps the last 5 profiles, configurable by -[`--profiles_to_retain`](/reference/command-line-reference#flag--generate_json_trace_profile), -in the output base by default for post-build analysis. Explicitly passing a -profile path with `--profile` disables automatic garbage collection. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/8.3.1/advanced/performance/memory.mdx b/8.3.1/advanced/performance/memory.mdx deleted file mode 100644 index 844e691..0000000 --- a/8.3.1/advanced/performance/memory.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -### Trade incremental build speeds for memory - -If your builds are too big, Bazel may throw an `OutOfMemoryError` (OOM) when -it doesn't have enough memory. You can make Bazel use less memory, at the cost -of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -### Trade build flexibility for memory with Skyfocus (Experimental) - -If you want to make Bazel use less memory *and* retain incremental build speeds, -you can tell Bazel the working set of files that you will be modifying, and -Bazel will only keep state needed to correctly incrementally rebuild changes to -those files. This feature is called **Skyfocus**. - -To use Skyfocus, pass the `--experimental_enable_skyfocus` flag: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus -``` - -By default, the working set will be the set of files next to the target being -built. In the example, all files in `//pkg` will be kept in the working set, and -changes to files outside of the working set will be disallowed, until you issue -`bazel clean` or restart the Bazel server. - -If you want to specify an exact set of files or directories, use the -`--experimental_working_set` flag, like so: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus ---experimental_working_set=path/to/another/dir,path/to/tests/dir -``` - -You can also pass `--experimental_skyfocus_dump_post_gc_stats` to show the -memory reduction amount: - -Putting it altogether, you should see something like this: - -```none -$ bazel test //pkg:target //tests/... --experimental_enable_skyfocus --experimental_working_set dir1,dir2,dir3/subdir --experimental_skyfocus_dump_post_gc_stats -INFO: --experimental_enable_skyfocus is enabled. Blaze will reclaim memory not needed to build the working set. Run 'blaze dump --skyframe=working_set' to show the working set, after this command. -WARNING: Changes outside of the working set will cause a build error. -INFO: Analyzed 149 targets (4533 packages loaded, 169438 targets configured). -INFO: Found 25 targets and 124 test targets... -INFO: Updated working set successfully. -INFO: Focusing on 334 roots, 3 leafs... (use --experimental_skyfocus_dump_keys to show them) -INFO: Heap: 1237MB -> 676MB (-45.31%) -INFO: Elapsed time: 192.670s ... -INFO: Build completed successfully, 62303 total actions -``` - -For this example, using Skyfocus allowed Bazel to drop 561MB (45%) of memory, -and incremental builds to handle changes to files under `dir1`, `dir2`, and -`dir3/subdir` will retain their fast speeds, with the tradeoff that Bazel cannot -rebuild changed files outside of these directories. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. diff --git a/8.3.1/basics/artifact-based-builds.mdx b/8.3.1/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/8.3.1/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/8.3.1/basics/build-systems.mdx b/8.3.1/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/8.3.1/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/8.3.1/basics/dependencies.mdx b/8.3.1/basics/dependencies.mdx deleted file mode 100644 index 1d3bf8f..0000000 --- a/8.3.1/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need -to have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the [`buildtools` -directory](https://github.com/bazelbuild/buildtools). - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive -dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel [enforces strict -transitive -dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version -Rule](https://opensource.google/docs/thirdparty/oneversion/) for -all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -Bazel did not use to automatically download transitive dependencies. It used to -employ a `WORKSPACE` file that required all transitive dependencies to be -listed, which led to a lot of pain when managing external dependencies. Bazel -has since added support for automatic transitive external dependency management -in the form of the `MODULE.bazel` file. See [external dependency -overview](/external/overview) for more details. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location that -is shared across engineers, so if a developer depends on an artifact that was -recently built by someone else, the build system automatically downloads it -instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) -goes down, because your entire build might grind to a halt if it’s unable to -download an external dependency. There’s also a security risk: if the -third-party system is compromised by an attacker, the attacker could replace the -referenced artifact with one of their own design, allowing them to inject -arbitrary code into your build. Both problems can be mitigated by mirroring any -artifacts you depend on onto servers you control and blocking your build system -from accessing third-party artifact repositories like Maven Central. The -trade-off is that these mirrors take effort and resources to maintain, so the -choice of whether to use them often depends on the scale of the project. The -security issue can also be completely prevented with little overhead by -requiring the hash of each third-party artifact to be specified in the source -repository, causing the build to fail if the artifact is tampered with. Another -alternative that completely sidesteps the issue is to vendor your project’s -dependencies. When a project vendors its dependencies, it checks them into -source control alongside the project’s source code, either as source or as -binaries. This effectively means that all of the project’s external dependencies -are converted to internal dependencies. Google uses this approach internally, -checking every third-party library referenced throughout Google into a -`third_party` directory at the root of Google’s source tree. However, this works -at Google only because Google’s source control system is custom built to handle -an extremely large monorepo, so vendoring might not be an option for all -organizations. diff --git a/8.3.1/basics/distributed-builds.mdx b/8.3.1/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/8.3.1/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/8.3.1/basics/hermeticity.mdx b/8.3.1/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/8.3.1/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/8.3.1/basics/index.mdx b/8.3.1/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/8.3.1/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/8.3.1/basics/task-based-builds.mdx b/8.3.1/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/8.3.1/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/8.3.1/brand/index.mdx b/8.3.1/brand/index.mdx deleted file mode 100644 index 2a21cd4..0000000 --- a/8.3.1/brand/index.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Bazel Brand Guidelines' ---- - - - -The Bazel trademark and logo ("Bazel Trademarks") are trademarks of Google, and -are treated separately from the copyright or patent license grants contained in -the Apache-licensed Bazel repositories on GitHub. Any use of the Bazel -Trademarks other than those permitted in these guidelines must be approved in -advance. - -## Purpose of the Brand Guidelines - -These guidelines exist to ensure that the Bazel project can share its technology -under open source licenses while making sure that the "Bazel" brand is protected -as a meaningful source identifier in a way that's consistent with trademark law. -By adhering to these guidelines, you help to promote the freedom to use and -develop high-quality Bazel technology. - -## Acceptable Uses - -Given the open nature of Bazel, you may use the Bazel trademark to refer to the -project without prior written permission. Examples of these approved references -include the following: - -* To refer to the Bazel Project itself; -* To link to bazel.build; -* To refer to unmodified source code or other files shared by the Bazel - repositories on GitHub; -* In blog posts, news articles, or educational materials about Bazel; -* To accurately identify that your design or implementation is based on, is - for use with, or is compatible with Bazel technology. - -Examples: - -* \[Your Product\] for Bazel -* \[Your Product\] is compatible with Bazel -* \[XYZ\] Conference for Bazel Users - -## General Guidelines - -* The Bazel name may never be used or registered in a manner that would cause - confusion as to Google's sponsorship, affiliation, or endorsement. -* Don't use the Bazel name as part of your company name, product name, domain - name, or social media profile. -* Other than as permitted by these guidelines, the Bazel name should not be - combined with other trademarks, terms, or source identifiers. -* Don't remove, distort or alter any element of the Bazel Trademarks. That - includes modifying the Bazel Trademark, for example, through hyphenation, - combination or abbreviation. Do not shorten, abbreviate, or create acronyms - out of the Bazel Trademarks. -* Don't display the word Bazel using any different stylization, color, or font - from the surrounding text. -* Don't use the term Bazel as a verb or use it in possessive form. -* Don't use the Bazel logo on any website, product UI, or promotional - materials without prior written permission from - [product@bazel.build](mailto:product@bazel.build). - -## Usage for Events and Community Groups - -The Bazel word mark may be used referentially in events, community groups, or -other gatherings related to the Bazel build system, but it may not be used in a -manner that implies official status or endorsement. - -Examples of appropriate naming conventions are: - -* \[XYZ\] Bazel User Group -* Bazel Community Day at \[XYZ\] -* \[XYZ\] Conference for Bazel Users - -where \[XYZ\] represents the location and optionally other wordings. - -Any naming convention that may imply official status or endorsement requires -review for approval from [product@bazel.build](mailto:product@bazel.build). - -Examples of naming conventions that require prior written permission: - -* BazelCon -* Bazel Conference - -## Contact Us - -Please do not hesitate to contact us at -[product@bazel.build](mailto:product@bazel.build) if you are unsure whether your -intended use of the Bazel Trademarks is in compliance with these guidelines, or -to ask for permission to use the Bazel Trademarks, clearly describing the -intended usage and duration. diff --git a/8.3.1/build/share-variables.mdx b/8.3.1/build/share-variables.mdx deleted file mode 100644 index b248034..0000000 --- a/8.3.1/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/8.3.1/build/style-guide.mdx b/8.3.1/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/8.3.1/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/8.3.1/community/recommended-rules.mdx b/8.3.1/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/8.3.1/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/8.3.1/community/remote-execution-services.mdx b/8.3.1/community/remote-execution-services.mdx deleted file mode 100644 index bede2b8..0000000 --- a/8.3.1/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [NativeLink](https://github.com/TraceMachina/nativelink) - -* Commercial - - * [Aspect Build](https://www.aspect.build/) – Self-hosted remote cache and remote execution services. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service with Build and Test UI. Can be self-hosted or hosted. diff --git a/8.3.1/community/roadmaps-starlark.mdx b/8.3.1/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/8.3.1/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/8.3.1/community/sig.mdx b/8.3.1/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/8.3.1/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/8.3.1/community/update.mdx b/8.3.1/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/8.3.1/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/8.3.1/concepts/build-ref.mdx b/8.3.1/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/8.3.1/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/8.3.1/concepts/platforms.mdx b/8.3.1/concepts/platforms.mdx deleted file mode 100644 index e560ea4..0000000 --- a/8.3.1/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--ios_multi_cpus=x86_64 --apple_platform_type=ios". - //platforms:ios - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - -flags: - # Maps "--ios_multi_cpus=x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/8.3.1/concepts/visibility.mdx b/8.3.1/concepts/visibility.mdx deleted file mode 100644 index cb7441d..0000000 --- a/8.3.1/concepts/visibility.mdx +++ /dev/null @@ -1,610 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. A target will fail -to build during the [analysis](/reference/glossary#analysis-phase) phase if it -violates the visibility of one of its dependencies. - -Generally, a target `A` is visible to a target `B` if they are in the same -location, or if `A` grants visibility to `B`'s location. In the absence of -[symbolic macros](/extending/macros), the term "location" can be simplified -to just "package"; see [below](#symbolic-macros) for more on symbolic macros. - -Visibility is specified by listing allowed packages. Allowing a package does not -necessarily mean that its subpackages are also allowed. For more details on -packages and subpackages, see [Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This shouldn't be done for production usage in -submitted code. - -The primary way to control visibility is with a rule's -[`visibility`](/reference/be/common-definitions#common.visibility) attribute. -The following subsections describe the attribute's format, how to apply it to -various kinds of targets, and the interaction between the visibility system and -symbolic macros. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that don't correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this location's package can use this target. - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets -declared in `//tests/BUILD`, but not by targets defined in -`//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -**Best practice:** When granting visibility to another team's project, prefer -`__subpackages__` over `__pkg__` to avoid needless visibility churn as that -project evolves and adds new subpackages. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is determined by taking its `visibility` attribute --- or a suitable default if not given -- and appending the location where the -target was declared. For targets not declared in a symbolic macro, if the -package specifies a [`default_visibility`](/reference/be/functions#package.default_visibility), -this default is used; for all other packages and for targets declared in a -symbolic macro, the default is just `["//visibility:private"]`. - -```starlark -# //mypkg/BUILD - -package(default_visibility = ["//friend:__pkg__"]) - -cc_library( - name = "t1", - ... - # No visibility explicitly specified. - # Effective visibility is ["//friend:__pkg__", "//mypkg:__pkg__"]. - # If no default_visibility were given in package(...), the visibility would - # instead default to ["//visibility:private"], and the effective visibility - # would be ["//mypkg:__pkg__"]. -) - -cc_library( - name = "t2", - ... - visibility = [":clients"], - # Effective visibility is ["//mypkg:clients, "//mypkg:__pkg__"], which will - # expand to ["//another_friend:__subpackages__", "//mypkg:__pkg__"]. -) - -cc_library( - name = "t3", - ... - visibility = ["//visibility:private"], - # Effective visibility is ["//mypkg:__pkg__"] -) - -package_group( - name = "clients", - packages = ["//another_friend/..."], -) -``` - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -```starlark -# //mypkg/BUILD - -java_binary( - name = "foo", - ... - visibility = ["//friend:__pkg__"], -) -``` - -```starlark -# //friend/BUILD - -some_rule( - name = "bar", - deps = [ - # Allowed directly by visibility of foo. - "//mypkg:foo", - # Also allowed. The java_binary's "_deploy.jar" implicit output file - # target the same visibility as the rule target itself. - "//mypkg:foo_deploy.jar", - ] - ... -) -``` - -### Source file target visibility - -Source file targets can either be explicitly declared using -[`exports_files`](/reference/be/functions#exports_files), or implicitly created -by referring to their filename in a label attribute of a rule (outside of a -symbolic macro). As with rule targets, the location of the call to -`exports_files`, or the BUILD file that referred to the input file, is always -automatically appended to the file's visibility. - -Files declared by `exports_files` can have their visibility set by the -`visibility` parameter to that function. If this parameter is not given, the visibility is public. - -Note: `exports_files` may not be used to override the visibility of a generated -file. - -For files that do not appear in a call to `exports_files`, the visibility -depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is true, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```starlark -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```starlark -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -### Visibility and symbolic macros - -This section describes how the visibility system interacts with -[symbolic macros](/extending/macros). - -#### Locations within symbolic macros - -A key detail of the visibility system is how we determine the location of a -declaration. For targets that are not declared in a symbolic macro, the location -is just the package where the target lives -- the package of the `BUILD` file. -But for targets created in a symbolic macro, the location is the package -containing the `.bzl` file where the macro's definition (the -`my_macro = macro(...)` statement) appears. When a target is created inside -multiple nested targets, it is always the innermost symbolic macro's definition -that is used. - -The same system is used to determine what location to check against a given -dependency's visibility. If the consuming target was created inside a macro, we -look at the innermost macro's definition rather than the package the consuming -target lives in. - -This means that all macros whose code is defined in the same package are -automatically "friends" with one another. Any target directly created by a macro -defined in `//lib:defs.bzl` can be seen from any other macro defined in `//lib`, -regardless of what packages the macros are actually instantiated in. Likewise, -they can see, and can be seen by, targets declared directly in `//lib/BUILD` and -its legacy macros. Conversely, targets that live in the same package cannot -necessarily see one another if at least one of them is created by a symbolic -macro. - -Within a symbolic macro's implementation function, the `visibility` parameter -has the effective value of the macro's `visibility` attribute after appending -the location where the macro was called. The standard way for a macro to export -one of its targets to its caller is to forward this value along to the target's -declaration, as in `some_rule(..., visibility = visibility)`. Targets that omit -this attribute won't be visible to the caller of the macro unless the caller -happens to be in the same package as the macro definition. This behavior -composes, in the sense that a chain of nested calls to submacros may each pass -`visibility = visibility`, re-exporting the inner macro's exported targets to -the caller at each level, without exposing any of the macros' implementation -details. - -#### Delegating privileges to a submacro - -The visibility model has a special feature to allow a macro to delegate its -permissions to a submacro. This is important for factoring and composing macros. - -Suppose you have a macro `my_macro` that creates a dependency edge using a rule -`some_library` from another package: - -```starlark -# //macro/defs.bzl -load("//lib:defs.bzl", "some_library") - -def _impl(name, visibility, ...): - ... - native.genrule( - name = name + "_dependency" - ... - ) - some_library( - name = name + "_consumer", - deps = [name + "_dependency"], - ... - ) - -my_macro = macro(implementation = _impl, ...) -``` - -```starlark -# //pkg/BUILD - -load("//macro:defs.bzl", "my_macro") - -my_macro(name = "foo", ...) -``` - -The `//pkg:foo_dependency` target has no `visibility` specified, so it is only -visible within `//macro`, which works fine for the consuming target. Now, what -happens if the author of `//lib` refactors `some_library` to instead be -implemented using a macro? - -```starlark -# //lib:defs.bzl - -def _impl(name, visibility, deps, ...): - some_rule( - # Main target, exported. - name = name, - visibility = visibility, - deps = deps, - ...) - -some_library = macro(implementation = _impl, ...) -``` - -With this change, `//pkg:foo_consumer`'s location is now `//lib` rather than -`//macro`, so its usage of `//pkg:foo_dependency` violates the dependency's -visibility. The author of `my_macro` can't be expected to pass -`visibility = ["//lib"]` to the declaration of the dependency just to work -around this implementation detail. - -For this reason, when a dependency of a target is also an attribute value of the -macro that declared the target, we check the dependency's visibility against the -location of the macro instead of the location of the consuming target. - -In this example, to validate whether `//pkg:foo_consumer` can see -`//pkg:foo_dependency`, we see that `//pkg:foo_dependency` was also passed as an -input to the call to `some_library` inside of `my_macro`, and instead check the -dependency's visibility against the location of this call, `//macro`. - -This process can repeat recursively, as long as a target or macro declaration is -inside of another symbolic macro taking the dependency's label in one of its -label-typed attributes. - -Note: Visibility delegation does not work for labels that were not passed into -the macro, such as labels derived by string manipulation. - -#### Finalizers - -Targets declared in a rule finalizer (a symbolic macro with `finalizer = True`), -in addition to seeing targets following the usual symbolic macro visibility -rules, can *also* see all targets which are visible to the finalizer target's -package. - -In other words, if you migrate a `native.existing_rules()`-based legacy macro to -a finalizer, the targets declared by the finalizer will still be able to see -their old dependencies. - -It is possible to define targets that a finalizer can introspect using -`native.existing_rules()`, but which it cannot use as dependencies under the -visibility system. For example, if a macro-defined target is not visible to its -own package or to the finalizer macro's definition, and is not delegated to the -finalizer, the finalizer cannot see such a target. Note, however, that a -`native.existing_rules()`-based legacy macro will also be unable to see such a -target. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target declarations into a macro in a `.bzl` file. Without the protection of -load visibility, they might find their macro reused by other collaborators in -the same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```starlark -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```starlark -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```starlark -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```starlark -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```starlark -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```starlark -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```starlark -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```starlark -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```starlark -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```starlark -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/8.3.1/configure/attributes.mdx b/8.3.1/configure/attributes.mdx deleted file mode 100644 index 7bc3f41..0000000 --- a/8.3.1/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.3.1/configure/best-practices.mdx b/8.3.1/configure/best-practices.mdx deleted file mode 100644 index abef72e..0000000 --- a/8.3.1/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `MODULE.bazel` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/8.3.1/configure/coverage.mdx b/8.3.1/configure/coverage.mdx deleted file mode 100644 index 9a50db0..0000000 --- a/8.3.1/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --branch-coverage --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/sphinx/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/8.3.1/contribute/breaking-changes.mdx b/8.3.1/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/8.3.1/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/8.3.1/contribute/codebase.mdx b/8.3.1/contribute/codebase.mdx deleted file mode 100644 index 8a13611..0000000 --- a/8.3.1/contribute/codebase.mdx +++ /dev/null @@ -1,1670 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `exec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". With complex SkyFunctions, state management - between restarts can get tricky, so - [`StateMachine`s](/contribute/statemachine-guide) were introduced for a - structured approach to logical concurrency, including hooks to suspend and - resume hierarchical computations within a `SkyFunction`. Example: - [`DependencyResolver#computeDependencies`][statemachine_example] - uses a `StateMachine` with `getState()` to compute the potentially huge set - of direct dependencies of a configured target, which otherwise can result in - expensive restarts. - -[statemachine_example]: https://developers.google.com/devsite/reference/markdown/links#reference_links - -Fundamentally, Bazel need these types of workarounds because hundreds of -thousands of in-flight Skyframe nodes is common, and Java's support of -lightweight threads [does not outperform][virtual_threads] the -`StateMachine` implementation as of 2023. - -[virtual_threads]: /contribute/statemachine-guide#epilogue_eventually_removing_callbacks - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to learn things about other configured targets that they depend on, and the only -way to tell things about themselves to other configured targets that depend on -them. The reason why "transitive" is in their name is that this is usually some -sort of roll-up of the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source or output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where an -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It was implemented in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can limit which -targets can depend on a particular target using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used for one -special case: - -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is _mostly_ written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/8.3.1/contribute/design-documents.mdx b/8.3.1/contribute/design-documents.mdx deleted file mode 100644 index 1fe70b9..0000000 --- a/8.3.1/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, MODULE.bazel, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/8.3.1/contribute/docs-style-guide.mdx b/8.3.1/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/8.3.1/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/8.3.1/contribute/docs.mdx b/8.3.1/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/8.3.1/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/8.3.1/contribute/index.mdx b/8.3.1/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/8.3.1/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/8.3.1/contribute/maintainers-guide.mdx b/8.3.1/contribute/maintainers-guide.mdx deleted file mode 100644 index d5edf45..0000000 --- a/8.3.1/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [meisterT](https://github.com/meisterT) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [buildbreaker2021](https://github.com/buildbreaker2021) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/8.3.1/contribute/naming.mdx b/8.3.1/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/8.3.1/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/8.3.1/contribute/patch-acceptance.mdx b/8.3.1/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/8.3.1/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/8.3.1/contribute/policy.mdx b/8.3.1/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/8.3.1/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/8.3.1/contribute/release-notes.mdx b/8.3.1/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/8.3.1/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/8.3.1/contribute/statemachine-guide.mdx b/8.3.1/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/8.3.1/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/8.3.1/contribute/windows-chocolatey-maintenance.mdx b/8.3.1/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/8.3.1/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/8.3.1/contribute/windows-scoop-maintenance.mdx b/8.3.1/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/8.3.1/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/8.3.1/docs/android-build-performance.mdx b/8.3.1/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/8.3.1/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/8.3.1/docs/android-instrumentation-test.mdx b/8.3.1/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/8.3.1/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/8.3.1/docs/android-ndk.mdx b/8.3.1/docs/android-ndk.mdx deleted file mode 100644 index b10a566..0000000 --- a/8.3.1/docs/android-ndk.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the target ABI - -To configure the target ABI, use the `--android_platforms` flag as follows: - -```posix-terminal -bazel build //:app --android_platforms={{ "" }}comma-separated list of platforms{{ "" }} -``` - -Just like the `--platforms` flag, the values passed to `--android_platforms` are -the labels of [`platform`](https://bazel.build/reference/be/platforms-and-toolchains#platform) -targets, using standard constraint values to describe your device. - -For example, for an Android device with a 64-bit ARM processor, you'd define -your platform like this: - -```py -platform( - name = "android_arm64", - constraint_values = [ - "@platforms//os:android", - "@platforms//cpu:arm64", - ], -) -``` - -Every Android `platform` should use the [`@platforms//os:android`](https://github.com/bazelbuild/platforms/blob/33a3b209f94856193266871b1545054afb90bb28/os/BUILD#L36) -OS constraint. To migrate the CPU constraint, check this chart: - -CPU Value | Platform -------------- | ------------------------------------------ -`armeabi-v7a` | `@platforms//cpu:armv7` -`arm64-v8a` | `@platforms//cpu:arm64` -`x86` | `@platforms//cpu:x86_32` -`x86_64` | `@platforms//cpu:x86_64` - -And, of course, for a multi-architecture APK, you pass multiple labels, for -example: `--android_platforms=//:arm64,//:x86_64` (assuming you defined those in -your top-level `BUILD.bazel` file). - -Bazel is unable to select a default Android platform, so one must be defined and -specified with `--android_platforms`. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | -| C++17 | `--cxxopt=-std=c++17` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--platforms` flag. - -For example, assuming you have defined Android platforms in -`my/platforms/BUILD`: - -```posix-terminal -bazel build //my/cc/jni:target \ - --platforms=//my/platforms:x86_64 -``` - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --platforms=//my/platforms:x86 - -common:android_armeabi-v7a --platforms=//my/platforms:armeabi-v7a - -# In general -common:android_ --platforms=//my/platforms: -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/8.3.1/docs/bazel-and-android.mdx b/8.3.1/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/8.3.1/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/8.3.1/docs/bazel-and-apple.mdx b/8.3.1/docs/bazel-and-apple.mdx deleted file mode 100644 index 6e4a06f..0000000 --- a/8.3.1/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/8.3.1/docs/bazel-and-cpp.mdx b/8.3.1/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/8.3.1/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/8.3.1/docs/bazel-and-java.mdx b/8.3.1/docs/bazel-and-java.mdx deleted file mode 100644 index e9476aa..0000000 --- a/8.3.1/docs/bazel-and-java.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using the `local_java_repository` or -`remote_java_repository` repo rules in a module extension. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@rules_java//toolchains:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@rules_java//toolchains:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `MODULE.bazel` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@rules_java//toolchains:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@rules_java//toolchains:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@rules_java//toolchains:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/8.3.1/docs/bazel-and-javascript.mdx b/8.3.1/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/8.3.1/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/8.3.1/docs/configurable-attributes.mdx b/8.3.1/docs/configurable-attributes.mdx deleted file mode 100644 index 3515852..0000000 --- a/8.3.1/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.3.1/docs/sandboxing.mdx b/8.3.1/docs/sandboxing.mdx deleted file mode 100644 index 6869795..0000000 --- a/8.3.1/docs/sandboxing.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel and debugging your sandboxing -environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/8.3.1/extending/aspects.mdx b/8.3.1/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/8.3.1/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/8.3.1/extending/auto-exec-groups.mdx b/8.3.1/extending/auto-exec-groups.mdx deleted file mode 100644 index abba3d5..0000000 --- a/8.3.1/extending/auto-exec-groups.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 'Automatic Execution Groups (AEGs)' ---- - - -Automatic execution groups select an [execution platform][exec_platform] -for each toolchain type. In other words, one target can have multiple -execution platforms without defining execution groups. - -## Quick summary - -Automatic execution groups are closely connected to toolchains. If you are using -toolchains, you need to set them on the affected actions (actions which use an -executable or a tool from a toolchain) by adding `toolchain` parameter. For -example: - -```python -ctx.actions.run( - ..., - executable = ctx.toolchain['@bazel_tools//tools/jdk:toolchain_type'].tool, - ..., - toolchain = '@bazel_tools//tools/jdk:toolchain_type', -) -``` -If the action does not use a tool or executable from a toolchain, and Blaze -doesn't detect that ([the error](#first-error-message) is raised), you can set -`toolchain = None`. - -If you need to use multiple toolchains on a single execution platform (an action -uses executable or tools from two or more toolchains), you need to manually -define [exec_groups][exec_groups] (check -[When should I use a custom exec_group?][multiple_toolchains_exec_groups] -section). - -## History - -Before AEGs, the execution platform was selected on a rule level. For example: - -```python -my_rule = rule( - _impl, - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], -) -``` - -Rule `my_rule` registers two toolchain types. This means that the [Toolchain -Resolution](https://bazel.build/extending/toolchains#toolchain-resolution) used -to find an execution platform which supports both toolchain types. The selected -execution platform was used for each registered action inside the rule, unless -specified differently with [exec_groups][exec_groups]. -In other words, all actions inside the rule used to have a single execution -platform even if they used tools from different toolchains (execution platform -is selected for each target). This resulted in failures when there was no -execution platform supporting all toolchains. - -## Current state - -With AEGs, the execution platform is selected for each toolchain type. The -implementation function of the earlier example, `my_rule`, would look like: - -```python -def _impl(ctx): - ctx.actions.run( - mnemonic = "First action", - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - toolchain = '//tools:toolchain_type_1', - ) - - ctx.actions.run( - mnemonic = "Second action", - executable = ctx.toolchain['//tools:toolchain_type_2'].tool, - toolchain = '//tools:toolchain_type_2', - ) -``` - -This rule creates two actions, the `First action` which uses executable from a -`//tools:toolchain_type_1` and the `Second action` which uses executable from a -`//tools:toolchain_type_2`. Before AEGs, both of these actions would be executed -on a single execution platform which supports both toolchain types. With AEGs, -by adding the `toolchain` parameter inside the actions, each action executes on -the execution platform that provides the toolchain. The actions may be executed -on different execution platforms. - -The same is effective with [ctx.actions.run_shell][run_shell] where `toolchain` -parameter should be added when `tools` are from a toolchain. - -## Difference between custom exec groups and automatic exec groups - -As the name suggests, AEGs are exec groups created automatically for each -toolchain type registered on a rule. There is no need to manually specify them, -unlike the "classic" exec groups. - -### When should I use a custom exec_group? - -Custom exec_groups are needed only in case where multiple toolchains need to -execute on a single execution platform. In all other cases there's no need to -define custom exec_groups. For example: - -```python -def _impl(ctx): - ctx.actions.run( - ..., - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - tools = [ctx.toolchain['//tools:toolchain_type_2'].tool], - exec_group = 'two_toolchains', - ) -``` - -```python -my_rule = rule( - _impl, - exec_groups = { - "two_toolchains": exec_group( - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], - ), - } -) -``` - -## Migration of AEGs - -Internally in google3, Blaze is already using AEGs. -Externally for Bazel, migration is in the process. Some rules are already using -this feature (e.g. Java and C++ rules). - -### Which Bazel versions support this migration? - -AEGs are fully supported from Bazel 7. - -### How to enable AEGs? - -Set `--incompatible_auto_exec_groups` to true. More information about the flag -on [the GitHub issue][github_flag]. - -### How to enable AEGs inside a particular rule? - -Set the `_use_auto_exec_groups` attribute on a rule. - -```python -my_rule = rule( - _impl, - attrs = { - "_use_auto_exec_groups": attr.bool(default = True), - } -) -``` -This enables AEGs only in `my_rule` and its actions start using the new logic -when selecting the execution platform. Incompatible flag is overridden with this -attribute. - -### How to disable AEGs in case of an error? - -Set `--incompatible_auto_exec_groups` to false to completely disable AEGs in -your project ([flag's GitHub issue][github_flag]), or disable a particular rule -by setting `_use_auto_exec_groups` attribute to `False` -([more details about the attribute](#how-enable-particular-rule)). - -### Error messages while migrating to AEGs - -#### Couldn't identify if tools are from implicit dependencies or a toolchain. Please set the toolchain parameter. If you're not using a toolchain, set it to 'None'. - * In this case you get a stack of calls before the error happened and you can - clearly see which exact action needs the toolchain parameter. Check which - toolchain is used for the action and set it with the toolchain param. If no - toolchain is used inside the action for tools or executable, set it to - `None`. - -#### Action declared for non-existent toolchain '[toolchain_type]'. - * This means that you've set the toolchain parameter on the action but didn't -register it on the rule. Register the toolchain or set `None` inside the action. - -## Additional material - -For more information, check design document: -[Automatic exec groups for toolchains][aegs_design_doc]. - -[exec_platform]: https://bazel.build/extending/platforms#:~:text=Execution%20%2D%20a%20platform%20on%20which%20build%20tools%20execute%20build%20actions%20to%20produce%20intermediate%20and%20final%20outputs. -[exec_groups]: https://bazel.build/extending/exec-groups -[github_flag]: https://github.com/bazelbuild/bazel/issues/17134 -[aegs_design_doc]: https://docs.google.com/document/d/1-rbP_hmKs9D639YWw5F_JyxPxL2bi6dSmmvj_WXak9M/edit#heading=h.5mcn15i0e1ch -[run_shell]: https://bazel.build/rules/lib/builtins/actions#run_shell -[multiple_toolchains_exec_groups]: /extending/auto-exec-groups#when-should-use-exec-groups diff --git a/8.3.1/extending/concepts.mdx b/8.3.1/extending/concepts.mdx deleted file mode 100644 index eb1d6b8..0000000 --- a/8.3.1/extending/concepts.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A macro is a function that instantiates rules. Macros come in two flavors: -[symbolic macros](/extending/macros) (new in Bazel 8) and [legacy -macros](/extending/legacy-macros). The two flavors of macros are defined -differently, but behave almost the same from the point of view of a user. A -macro is useful when a `BUILD` file is getting too repetitive or too complex, as -it lets you reuse some code. The function is evaluated as soon as the `BUILD` -file is read. After the evaluation of the `BUILD` file, Bazel has little -information about macros. If your macro generates a `genrule`, Bazel will -behave *almost* as if you declared that `genrule` in the `BUILD` file. (The one -exception is that targets declared in a symbolic macro have [special visibility -semantics](/extending/macros#visibility): a symbolic macro can hide its internal -targets from the rest of the package.) - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro; we recommend a symbolic -macro, unless you need to support older Bazel versions. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most users will -never have to write one; they will only load and call existing rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to reuse some code. - Then [learn more about macros](/extending/macros) and [using them to create - "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you -may want to write [aspects](/extending/aspects) and [repository -rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/8.3.1/extending/depsets.mdx b/8.3.1/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/8.3.1/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/8.3.1/extending/exec-groups.mdx b/8.3.1/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/8.3.1/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/8.3.1/extending/platforms.mdx b/8.3.1/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/8.3.1/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/8.3.1/extending/repo.mdx b/8.3.1/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/8.3.1/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/8.3.1/extending/rules.mdx b/8.3.1/extending/rules.mdx deleted file mode 100644 index 609d719..0000000 --- a/8.3.1/extending/rules.mdx +++ /dev/null @@ -1,1281 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`genrule` and `filegroup`, provide some core support. -By defining your own rules, you can add support for languages and tools -that Bazel doesn't support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function doesn't run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [rule kind](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable = True`), or specifically -a test executable (with `test = True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule can't be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user doesn't -specify it in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which can't be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can't actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency using that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files using a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -There's a legacy struct style, which is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel doesn't -perform file I/O during the analysis phase, these objects can't be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files can't be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies using that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file = True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable = True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). -Often, the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive = [headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with = ",") - args.add_joined("-s", srcs, join_with = ",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it can't depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs won't be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which decides which actions to -execute. It is an error if there is a cycle in the dependency graph. Creating -an action doesn't guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of provider objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions won't be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields = { - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }, -) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances satisfy certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments don't match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# Keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {"files_to_link": files_to_link, "headers": all_headers} - -ExampleInfo, _new_exampleinfo = provider( - fields = ["files_to_link", "headers"], - init = _exampleinfo_init, -) -``` - -A rule implementation may then instantiate the provider as follows: - -```python -ExampleInfo( - files_to_link = my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers -) -``` - -The raw constructor can be used to define alternative public factory functions -that don't go through the `init` logic. For example, exampleinfo.bzl -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code can't load it and -generate arbitrary provider instances. - -Another use for `init` is to prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable = True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you don't specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it doesn't support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes can't be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size = "small", **kwargs): - _example_test(size = size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment, or command line -argument or flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs using [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they don't define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built-in the -"target" configuration, while tools that should run on the execution platform -are built-in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable = True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg = "exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg = "target"` in -the attribute. - -`cfg = "target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable = False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg = my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp` and `java`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns -`True` in coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"exec"` in the attribute schema. in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -#### Test rules - -Test rules require additional setup to generate coverage reports. The rule -itself has to add the following implicit attributes: - -```python -my_test = rule( - ..., - attrs = { - ..., - # Implicit dependencies used by Bazel to generate coverage reports. - "_lcov_merger": attr.label( - default = configuration_field(fragment = "coverage", name = "output_generator"), - executable = True, - cfg = config.exec(exec_group = "test"), - ), - "_collect_cc_coverage": attr.label( - default = "@bazel_tools//tools/test:collect_cc_coverage", - executable = True, - cfg = config.exec(exec_group = "test"), - ) - }, - test = True, -) -``` - -By using `configuration_field`, the dependency on the Java LCOV merger tool can -be avoided as long as coverage is not requested. - -When the test is run, it should emit coverage information in the form of one or -more [LCOV files] -(https://manpages.debian.org/unstable/lcov/geninfo.1.en.html#TRACEFILE_FORMAT) -with unique names into the directory specified by the `COVERAGE_DIR` environment -variable. Bazel will then merge these files into a single LCOV file using the -`_lcov_merger` tool. If present, it will also collect C/C++ coverage using the -`_collect_cc_coverage` tool. - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules can't do this validation at analysis time because rules can't read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action doesn't produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action won't be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path], - ) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec" - ), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel doesn't -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a [`struct`](/rules/lib/builtins/struct) returned by the rule's -implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Such providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -*This style is deprecated and should not be used in new code;* see following for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x = "foo", ...) - modern_data = MyInfo(y = "bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus don't create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the preceding syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept or require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/8.3.1/extending/toolchains.mdx b/8.3.1/extending/toolchains.mdx deleted file mode 100644 index b904cbe..0000000 --- a/8.3.1/extending/toolchains.mdx +++ /dev/null @@ -1,600 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as -input a set of required toolchain types, the target platform, the list of -available execution platforms, and the list of available toolchains. Its outputs -are a selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. The list of available toolchains is filtered to remove any toolchains - specifying `target_settings` that don't match the current configuration. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/8.3.1/external/advanced.mdx b/8.3.1/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/8.3.1/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/8.3.1/external/lockfile.mdx b/8.3.1/external/lockfile.mdx deleted file mode 100644 index af13c56..0000000 --- a/8.3.1/external/lockfile.mdx +++ /dev/null @@ -1,287 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. -- - -### Hidden lockfile - -Bazel also maintains another lockfile at -`"$(bazel info output_base)"/MODULE.bazel.lock`. The format and contents of this -lockfile are explicitly unspecified. It is only used as a performance -optimization. While it can be deleted together with the output base via -`bazel clean --expunge`, any need to do so is a bug in either Bazel itself or a -module extension. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/8.3.1/external/module.mdx b/8.3.1/external/module.mdx deleted file mode 100644 index 6a9cf13..0000000 --- a/8.3.1/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}+{{ "" }}version{{ -"" }}` (for example, `bazel_skylib+1.0.3`) or `{{ "" }}module_name{{ -"" }}+` (for example, `bazel_features+`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/8.3.1/help.mdx b/8.3.1/help.mdx deleted file mode 100644 index b2976e6..0000000 --- a/8.3.1/help.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2023](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsefrwb_ySGRi_bvQejpO_Tj) - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/8.3.1/install/bazelisk.mdx b/8.3.1/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/8.3.1/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/8.3.1/install/compile-source.mdx b/8.3.1/install/compile-source.mdx deleted file mode 100644 index a228b22..0000000 --- a/8.3.1/install/compile-source.mdx +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/8.3.1/install/completion.mdx b/8.3.1/install/completion.mdx deleted file mode 100644 index 856784c..0000000 --- a/8.3.1/install/completion.mdx +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Build the completion script: - - ``` - bazel build //scripts:bazel-complete.bash - ``` - 2. The completion file is built under - `bazel-bin/scripts/bazel-complete.bash`. - - Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/8.3.1/install/docker-container.mdx b/8.3.1/install/docker-container.mdx deleted file mode 100644 index 3a5d017..0000000 --- a/8.3.1/install/docker-container.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -ubuntu@5a99103747c6:~$ git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -ubuntu@5a99103747c6:~$ -``` - -Explore the container contents. - -```posix-terminal -ubuntu@5a99103747c6:~$ gcc --version -gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 -Copyright (C) 2019 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -ubuntu@5a99103747c6:~$ java -version -openjdk version "1.8.0_362" -OpenJDK Runtime Environment (build 1.8.0_362-8u372-ga~us1-0ubuntu1~20.04-b09) -OpenJDK 64-Bit Server VM (build 25.362-b09, mixed mode) - -ubuntu@5a99103747c6:~$ python -V -Python 3.8.10 - -ubuntu@5a99103747c6:~$ bazel version -WARNING: Invoking Bazel in batch mode since it is not invoked from within a workspace (below a directory having a WORKSPACE file). -Extracting Bazel installation... -Build label: 6.2.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Jun 2 16:59:58 2023 (1685725198) -Build timestamp: 1685725198 -Build timestamp as int: 1685725198 -``` - -## Explore the Bazel Dockerfile - -If you want to check how the Bazel Docker image is built, you can find its Dockerfile at [bazelbuild/continuous-integration/bazel/oci](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). diff --git a/8.3.1/install/ide.mdx b/8.3.1/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/8.3.1/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/8.3.1/install/index.mdx b/8.3.1/install/index.mdx deleted file mode 100644 index 10f53c4..0000000 --- a/8.3.1/install/index.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -You can find available Bazel releases on our [release page](/release). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Homebrew](https://formulae.brew.sh/formula/bazel) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/8.3.1/install/os-x.mdx b/8.3.1/install/os-x.mdx deleted file mode 100644 index 9a0f3f8..0000000 --- a/8.3.1/install/os-x.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" - -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/8.3.1/install/suse.mdx b/8.3.1/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/8.3.1/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/8.3.1/install/ubuntu.mdx b/8.3.1/install/ubuntu.mdx deleted file mode 100644 index a31bd2f..0000000 --- a/8.3.1/install/ubuntu.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y - -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg - -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings - -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel - -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/8.3.1/migrate/index.mdx b/8.3.1/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/8.3.1/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/8.3.1/migrate/maven.mdx b/8.3.1/migrate/maven.mdx deleted file mode 100644 index 38aaffc..0000000 --- a/8.3.1/migrate/maven.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences between Maven -and Bazel, and provides a migration example using the Guava project. - -When migrating from any build tool to Bazel, it's best to have both build tools -running in parallel until you have fully migrated your development team, CI -system, and any other relevant systems. You can run Maven and Bazel in the same -repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -, it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial [Introduction to Bazel: - Build Java](/start/java) before you start migrating. The tutorial explains - Bazel's concepts, structure, and label syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build files - and multiple targets per `BUILD` file, allowing for builds that are more - incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does not - automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java - package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the MODULE.bazel file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the [Guava -project](https://github.com/google/guava) from Maven to Bazel. The -Guava project used is release `v31.1`. The examples using Guava do not walk -through each step in the migration, but they do show the files and contents that -are generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the MODULE.bazel file - -Create a file named `MODULE.bazel` at the root of your project. If your project -has no external dependencies, this file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the MODULE.bazel -file. You can use `rules_jvm_external` to manage dependencies from Maven. For -instructions about using this ruleset, see [the -README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external) -. - -#### Guava project example: external dependencies - -You can list the external dependencies of the [Guava -project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_jvm_external", version = "6.2") -maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") -maven.install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -use_repo(maven, "maven") -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your -project should be built. Unlike Maven with its one `pom.xml` file, Bazel can use -many `BUILD` files to build a project. These files specify multiple build -targets, which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file at the root of -your project and using it to do an initial build using Bazel. Then, you refine -your build by adding more `BUILD` files with more granular targets. - -1. In the same directory as your `MODULE.bazel` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to build - your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples - above, the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. - * Take a look at the [example below of this top-level BUILD - file](#guava-2) from the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build your - project to ensure that it works. On the command line, from your workspace - directory, use `bazel build //:everything` to build your project with Bazel. - - The project has now been successfully built with Bazel. You will need to add - more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used to -build the entire project. Here are the contents of this initial `BUILD` file in -the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - javacopts = ["-XepDisableAllChecks"], - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your -first build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with Java - packages that have the fewest dependencies and work you way up to packages - with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number of packages - grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add a - `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build file. - Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel to -produce all of your builds. diff --git a/8.3.1/migrate/xcode.mdx b/8.3.1/migrate/xcode.mdx deleted file mode 100644 index 986cd11..0000000 --- a/8.3.1/migrate/xcode.mdx +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps for -converting an Xcode project to a Bazel project. It also provides troubleshooting -solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present within - the workspace directory or specified as dependencies in the `MODULE.bazel` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the [iOS app - tutorial](/start/ios-app)). You should understand the Bazel workspace, - including the `MODULE.bazel` and `BUILD` files, as well as the concepts of - targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see [Working with external -dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `MODULE.bazel` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `MODULE.bazel` file - -Create a `MODULE.bazel` file in a new directory. This directory becomes the -Bazel workspace root. If the project uses no external dependencies, this file -can be empty. If the project depends on files or packages that are not in one of -the project's directories, specify these external dependencies in the -`MODULE.bazel` file. - -Note: Place the project source code within the directory tree containing the -`MODULE.bazel` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with -[swift_bazel](https://github.com/cgrindel/swift_bazel), you must -convert them into Bazel packages as described in the [following -tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/) -. - -Note: SwiftPM support is a manual process with many variables. SwiftPM -integration with Bazel has not been fully verified and is not officially -supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create the -`BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see [Workspaces, -packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a -[`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an -[`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build -rules](https://github.com/bazelbuild/rules_apple) support running -unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) - to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for - [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), - [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) - and - [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) target for each -Objective-C library and a -[`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) function to include -all sources and/or headers of a certain type. Use it carefully as it might -include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in -the [rules_apple examples -directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For -example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - -For more information on build rules, see [Apple Rules for -Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number of - packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix build - errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `MODULE.bazel` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features) -. - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/8.3.1/query/aquery.mdx b/8.3.1/query/aquery.mdx deleted file mode 100644 index 2176ff6..0000000 --- a/8.3.1/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noslim_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/8.3.1/query/cquery.mdx b/8.3.1/query/cquery.mdx deleted file mode 100644 index bd829c8..0000000 --- a/8.3.1/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but -[`allrdeps`](/query/language#allrdeps), -[`buildfiles`](/query/language#buildfiles), -[`rbuildfiles`](/query/language#rbuildfiles), -[`siblings`](/query/language#siblings), [`tests`](/query/language#tests), and -[`visible`](/query/language#visible). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a previous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genrule` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/8.3.1/reference/glossary.mdx b/8.3.1/reference/glossary.mdx deleted file mode 100644 index 3b0b497..0000000 --- a/8.3.1/reference/glossary.mdx +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Legacy macro - -A flavor of [macro](#macro) which is declared as an ordinary -[Starlark](#starlark) function, and which runs as a side effect of executing a -`BUILD` file. - -Legacy macros can do anything a function can. This means they can be convenient, -but they can also be harder to read, write, and use. A legacy macro might -unexpectedly mutate its arguments or fail when given a `select()` or ill-typed -argument. - -Contrast with [symbolic macros](#symbolic-macro). - -**See also:** [Legacy macro documentation](/extending/legacy-macros) - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) callable. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -Comes in two flavors: [symbolic macros](#symbolic-macro) (since Bazel 8) and -[legacy macros](#legacy-macro). - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Symbolic macro - -A flavor of [macro](#macro) which is declared with a [rule](#rule)-like -[attribute](#attribute) schema, allows hiding internal declared -[targets](#target) from their own package, and enforces a predictable naming -pattern on the targets that the macro declares. Designed to avoid some of the -problems seen in large [legacy macro](#legacy-macro) codebases. - -**See also:** [Symbolic macro documentation](/extending/macros) - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/8.3.1/reference/skyframe.mdx b/8.3.1/reference/skyframe.mdx deleted file mode 100644 index ba9149f..0000000 --- a/8.3.1/reference/skyframe.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build is achieved by evaluating the node that represents the build request. - -First, Bazel finds the `SkyFunction` corresponding to the key of the top-level -`SkyKey`. The function then requests the evaluation of the nodes it needs to -evaluate the top-level node, which in turn result in other `SkyFunction` calls, -until the leaf nodes are reached. Leaf nodes are usually ones that represent -input files in the file system. Finally, Bazel ends up with the value of the -top-level `SkyValue`, some side effects (such as output files in the file -system) and a directed acyclic graph of the dependencies between the nodes -involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in -advance all of the nodes it needs to do its job. A simple example is evaluating -an input file node that turns out to be a symlink: the function tries to read -the file, realizes that it is a symlink, and thus fetches the file system node -representing the target of the symlink. But that itself can be a symlink, in -which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the -services provided to it by an interface called `SkyFunction.Environment`. These -are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If - the node is available, its value is returned, otherwise, `null` is returned - and the function itself is expected to return `null`. In the latter case, - the dependent node is evaluated, and then the original node builder is - invoked again, but this time the same `env.getValue` call will return a - non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. - This does essentially the same, except that the dependent nodes are - evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs - to be taken that two different functions avoid stepping on each other's - toes. In general, write side effects (where data flows outwards from Bazel) - are okay, read side effects (where data flows inwards into Bazel without a - registered dependency) are not, because they are an unregistered dependency - and as such, can cause incorrect incremental builds. - -Well-behaved `SkyFunction` implementations avoid accessing data in any other way -than requesting dependencies (such as by directly reading the file system), -because that results in Bazel not registering the data dependency on the file -that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` -value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on - other nodes, Bazel can guarantee that if the input state is the same, the - same data is returned. If all sky functions are deterministic, this means - that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions - is recorded, Bazel can invalidate only the exact set of nodes that need to - be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of - requesting dependencies, functions that don't depend on each other can be - run in parallel and Bazel can guarantee that the result is the same as if - they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel -can build up a complete data flow graph from the input files to the output -files, and use this information to only rebuild those nodes that actually need -to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one -and the top-down one. Which one is optimal depends on how the dependency graph -looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed - inputs is known, all the nodes are invalidated that transitively depend on - changed files. This is optimal if the same top-level node will be built - again. Note that bottom-up invalidation requires running `stat()` on all - input files of the previous build to determine if they were changed. This - can be improved by using `inotify` or a similar mechanism to learn about - changed files. - -* During top-down invalidation, the transitive closure of the top-level node - is checked and only those nodes are kept whose transitive closure is clean. - This is better if the node graph is large, but the next build only needs a - small subset of it: bottom-up invalidation would invalidate the larger graph - of the first build, unlike top-down invalidation, which just walks the small - graph of second build. - -Bazel only does bottom-up invalidation. - -To get further incrementality, Bazel uses _change pruning_: if a node is -invalidated, but upon rebuild, it is discovered that its new value is the same -as its old value, the nodes that were invalidated due to a change in this node -are "resurrected". - -This is useful, for example, if one changes a comment in a C++ file: then the -`.o` file generated from it will be the same, thus, it is unnecessary to call -the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an -all-or-nothing affair: when a dependency changes, the dependent node is always -rebuilt from scratch, even if a better algorithm would exist that would mutate -the old value of the node based on the changes. A few examples where this would -be useful: - -* Incremental linking -* When a single class file changes in a JAR file, it is possible - modify the JAR file in-place instead of building it from scratch again. - -The reason why Bazel does not support these things in a principled way -is twofold: - -* There were limited performance gains. -* Difficulty to validate that the result of the mutation is the same as that - of a clean rebuild would be, and Google values builds that are bit-for-bit - repeatable. - -Until now, it was possible to achieve good enough performance by decomposing an -expensive build step and achieving partial re-evaluation that way. For example, -in an Android app, you can split all the classes into multiple groups and dex -them separately. This way, if classes in a group are unchanged, the dexing does -not have to be redone. - -## Mapping to Bazel concepts - -This is high level summary of the key `SkyFunction` and `SkyValue` -implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, the - function also computes additional information in order to detect changes to - the file. This is the lowest level node in the Skyframe graph and has no - dependencies. -* **FileValue**. Used by anything that cares about the actual contents or - resolved path of a file. Depends on the corresponding `FileStateValue` and - any symlinks that need to be resolved (such as the `FileValue` for `a/b` - needs the resolved path of `a` and the resolved path of `a/b`). The - distinction between `FileValue` and `FileStateValue` is important because - the latter can be used in cases where the contents of the file are not - actually needed. For example, the file contents are irrelevant when - evaluating file system globs (such as `srcs=glob(["*/*.java"])`). -* **DirectoryListingStateValue**. The result of `readdir()`. Like - `FileStateValue`, this is the lowest level node and has no dependencies. -* **DirectoryListingValue**. Used by anything that cares about the entries of - a directory. Depends on the corresponding `DirectoryListingStateValue`, as - well as the associated `FileValue` of the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on - the `FileValue` of the associated `BUILD` file, and also transitively on any - `DirectoryListingValue` that is used to resolve the globs in the package - (the data structure representing the contents of a `BUILD` file internally). -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple - of the set of actions generated during the analysis of a target and - information provided to dependent configured targets. Depends on the - `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` - of direct dependencies, and a special node representing the build - configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an - output artifact. Artifacts are almost equivalent to files, and are used to - refer to files during the actual execution of build steps. Source files - depends on the `FileValue` of the associated node, and output artifacts - depend on the `ActionExecutionValue` of whatever action generates the - artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on - the `ArtifactValues` of its input files. The action it executes is contained - within its SkyKey, which is contrary to the concept that SkyKeys should be - small. Note that `ActionExecutionValue` and `ArtifactValue` are unused if - the execution phase does not run. - -As a visual aid, this diagram shows the relationships between -SkyFunction implementations after a build of Bazel itself: - -![A graph of SkyFunction implementation relationships](/reference/skyframe.png) diff --git a/8.3.1/release/backward-compatibility.mdx b/8.3.1/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/8.3.1/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/8.3.1/release/index.mdx b/8.3.1/release/index.mdx deleted file mode 100644 index a3cc526..0000000 --- a/8.3.1/release/index.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 9 | Rolling| [Check rolling release page](/release/rolling) | N/A | -| Bazel 8 | Active| [8.0.0](https://github.com/bazelbuild/bazel/releases/tag/8.0.0) | December 2027 | -| Bazel 7 | Maintenance| [7.4.1](https://github.com/bazelbuild/bazel/releases/tag/7.4.1) | Dec 2026 | -| Bazel 6 | Maintenance | [6.5.0](https://github.com/bazelbuild/bazel/releases/tag/6.5.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Deprecated | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel LTS releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 5 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Backport changes using Cherry-Pick Request Issue for Bazel maintainers. - * Bazel maintainers can request to cherry-pick specific commit(s) - to a release branch. This process is initiated by creating a - cherry-pick request on GitHub. Here's how to do it. - 1. Open the [cherry-pick request](https://github.com/bazelbuild/bazel/issues/new?assignees=&labels=&projects=&template=cherry_pick_request.yml) - 2. Fill in the request details - * Title: Provide a concise and descriptive title for the request. - * Commit ID(s): Enter the ID(s) of the commit(s) you want to - cherry-pick. If there are multiple commits, then separate - them with commas. - * Category: Specify the category of the request. - * Reviewer(s): For multiple reviewers, separate their GitHub - ID's with commas. - 3. Set the milestone - * Find the "Milestone" section and click the setting. - * Select the appropriate X.Y.Z release blockers. This action - triggers the cherry-pick bot to process your request - for the "release-X.Y.Z" branch. - 4. Submit the Issue - * Once all details are filled in and the miestone is set, - submit the issue. - - * The cherry-pick bot will process the request and notify - if the commit(s) are eligible for cherry-picking. If - the commits are cherry-pickable, which means there's no - merge conflict while cherry-picking the commit, then - the bot will create a new pull request. When the pull - request is approved by a member of the Bazel team, the - commits are cherry-picked and merged to the release branch. - For a visual example of a completed cherry-pick request, - refer to this - [example](https://github.com/bazelbuild/bazel/issues/20230) - . - -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created; cherry-picks are limited to critical - fixes only. If a cherry-pick is needed, the requester must answer the - following questions: Why is this change critical, and what benefits does - it provide? What is the likelihood of this change introducing a - regression? -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/8.3.1/release/rule-compatibility.mdx b/8.3.1/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/8.3.1/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/8.3.1/remote/bep-examples.mdx b/8.3.1/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/8.3.1/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/8.3.1/remote/bep-glossary.mdx b/8.3.1/remote/bep-glossary.mdx deleted file mode 100644 index 3bd11ee..0000000 --- a/8.3.1/remote/bep-glossary.mdx +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/8.3.1/remote/bep.mdx b/8.3.1/remote/bep.mdx deleted file mode 100644 index bafdaa9..0000000 --- a/8.3.1/remote/bep.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L131-L140) -or [structured -information](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L194-L205) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L1276). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--bes_instance_name` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/8.3.1/remote/cache-local.mdx b/8.3.1/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/8.3.1/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/8.3.1/remote/cache-remote.mdx b/8.3.1/remote/cache-remote.mdx deleted file mode 100644 index a614f4f..0000000 --- a/8.3.1/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/8.3.1/remote/caching.mdx b/8.3.1/remote/caching.mdx deleted file mode 100644 index 8fd6adc..0000000 --- a/8.3.1/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [NativeLink](https://github.com/TraceMachina/nativelink) diff --git a/8.3.1/remote/creating.mdx b/8.3.1/remote/creating.mdx deleted file mode 100644 index 0e46a07..0000000 --- a/8.3.1/remote/creating.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output message describing any errors encountered in processing or executing -the request. A worker should capture the `stdout` and `stderr` of any tool it -calls and report them through the `WorkResponse`. Writing it to the `stdout` of -the worker process is unsafe, as it will interfere with the worker protocol. -Writing it to the `stderr` of the worker process is safe, but the result is -collected in a per-worker log file instead of ascribed to individual actions. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/8.3.1/remote/multiplex.mdx b/8.3.1/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/8.3.1/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/8.3.1/remote/output-directories.mdx b/8.3.1/remote/output-directories.mdx deleted file mode 100644 index bdbe029..0000000 --- a/8.3.1/remote/output-directories.mdx +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `${XDG_CACHE_HOME}/bazel` (or - `~/.cache/bazel`, if the `XDG_CACHE_HOME` environment variable is not set) on - Linux, `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$TEST_TMPDIR` is set, as in a test of Bazel itself, - then that value overrides the default. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <..._main> <== Symlink to execRoot - bazel-out => <...bazel-out> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - _tmp/actions/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/8.3.1/remote/persistent.mdx b/8.3.1/remote/persistent.mdx deleted file mode 100644 index 1a56946..0000000 --- a/8.3.1/remote/persistent.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/8.3.1/remote/rbe.mdx b/8.3.1/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/8.3.1/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/8.3.1/remote/rules.mdx b/8.3.1/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/8.3.1/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/8.3.1/remote/sandbox.mdx b/8.3.1/remote/sandbox.mdx deleted file mode 100644 index 5e2e823..0000000 --- a/8.3.1/remote/sandbox.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/8.3.1/remote/workspace.mdx b/8.3.1/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/8.3.1/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/8.3.1/rules/bzl-style.mdx b/8.3.1/rules/bzl-style.mdx deleted file mode 100644 index 941028a..0000000 --- a/8.3.1/rules/bzl-style.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Conventionally, `name` should be the first argument when defining a macro. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/8.3.1/rules/challenges.mdx b/8.3.1/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/8.3.1/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/8.3.1/rules/deploying.mdx b/8.3.1/rules/deploying.mdx deleted file mode 100644 index 3fe2c86..0000000 --- a/8.3.1/rules/deploying.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - MODULE.bazel - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### MODULE.bazel - -In the project's `MODULE.bazel`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -module(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains a brief description -of your ruleset, and the API users should expect. - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies, which you'll need to specify in -your MODULE.bazel file. - -#### Registering toolchains - -Your rules might also register toolchains, which you can also specify in the -MODULE.bazel file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `MODULE.bazel` file. This snippet in general will look as follows: - -``` -bazel_dep(name = "rules_", version = "") -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to add a dependency on your ruleset in their `MODULE.bazel` file. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/8.3.1/rules/errors/read-only-variable.mdx b/8.3.1/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/8.3.1/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/8.3.1/rules/faq.mdx b/8.3.1/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/8.3.1/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/8.3.1/rules/index.mdx b/8.3.1/rules/index.mdx deleted file mode 100644 index 2a6c3eb..0000000 --- a/8.3.1/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Rust](https://github.com/bazelbuild/rules_rust) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/8.3.1/rules/legacy-macro-tutorial.mdx b/8.3.1/rules/legacy-macro-tutorial.mdx deleted file mode 100644 index 44cdcfb..0000000 --- a/8.3.1/rules/legacy-macro-tutorial.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 'Creating a Legacy Macro' ---- - - - -IMPORTANT: This tutorial is for [*legacy macros*](/extending/legacy-macros). If -you only need to support Bazel 8 or newer, we recommend using [symbolic -macros](/extending/macros) instead; take a look at [Creating a Symbolic -Macro](../macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a legacy macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -```starlark -def miniature(name, src, size = "100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - # Note that the line below will fail if `src` is not a filename string - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - - * By convention, legacy macros have a `name` argument, just like rules. - - * To document the behavior of a legacy macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - - * To call a `genrule`, or any other native rule, prefix with `native.`. - - * Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like - `visibility`, or `tags`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -And finally, a **warning note**: the macro assumes that `src` is a filename -string (otherwise, `outs = ["small_" + src]` will fail). So `src = "image.png"` -works; but what happens if the `BUILD` file instead used `src = -"//other/package:image.png"`, or even `src = select(...)`? - -You should make sure to declare such assumptions in your macro's documentation. -Unfortunately, legacy macros, especially large ones, tend to be fragile because -it can be hard to notice and document all such assumptions in your code – and, -of course, some users of the macro won't read the documentation. We recommend, -if possible, instead using [symbolic macros](/extending/macros), which have -built\-in checks on attribute types. diff --git a/8.3.1/rules/macro-tutorial.mdx b/8.3.1/rules/macro-tutorial.mdx deleted file mode 100644 index 93825aa..0000000 --- a/8.3.1/rules/macro-tutorial.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Creating a Symbolic Macro' ---- - - - -IMPORTANT: This tutorial is for [*symbolic macros*](/extending/macros) – the new -macro system introduced in Bazel 8. If you need to support older Bazel versions, -you will want to write a [legacy macro](/extending/legacy-macros) instead; take -a look at [Creating a Legacy Macro](../legacy-macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a symbolic macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define an *implementation function* and a *macro declaration* in a separate -`.bzl` file, and call the file `miniature.bzl`: - -```starlark -# Implementation function -def _miniature_impl(name, visibility, src, size, **kwargs): - native.genrule( - name = name, - visibility = visibility, - srcs = [src], - outs = [name + "_small_" + src.name], - cmd = "convert $< -resize " + size + " $@", - **kwargs, - ) - -# Macro declaration -miniature = macro( - doc = """Create a miniature of the src image. - - The generated file name will be prefixed with `name + "_small_"`. - """, - implementation = _miniature_impl, - # Inherit most of genrule's attributes (such as tags and testonly) - inherit_attrs = native.genrule, - attrs = { - "src": attr.label( - doc = "Image file", - allow_single_file = True, - # Non-configurable because our genrule's output filename is - # suffixed with src's name. (We want to suffix the output file with - # srcs's name because some tools that operate on image files expect - # the files to have the right file extension.) - configurable = False, - ), - "size": attr.string( - doc = "Output size in WxH format", - default = "100x100", - ), - # Do not allow callers of miniature() to set srcs, cmd, or outs - - # _miniature_impl overrides their values when calling native.genrule() - "srcs": None, - "cmd": None, - "outs": None, - }, -) -``` - -A few remarks: - - * Symbolic macro implementation functions must have `name` and `visibility` - parameters. They should used for the macro's main target. - - * To document the behavior of a symbolic macro, use `doc` parameters for - `macro()` and its attributes. - - * To call a `genrule`, or any other native rule, use `native.`. - - * Use `**kwargs` to forward the extra inherited arguments to the underlying - `genrule` (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful so that a user can set standard attributes like `tags` or - `testonly`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/8.3.1/rules/performance.mdx b/8.3.1/rules/performance.mdx deleted file mode 100644 index 5870c0d..0000000 --- a/8.3.1/rules/performance.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -The [`--experimental_command_profile`](https://bazel.build/reference/command-line-reference#flag--experimental_command_profile) -flag may be used to capture Java Flight Recorder profiles of various kinds -(cpu time, wall time, memory allocations and lock contention). - -The [`--starlark_cpu_profile`](https://bazel.build/reference/command-line-reference#flag--starlark_cpu_profile) -flag may be used to write a pprof profile of CPU usage by all Starlark threads. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.4 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/8.3.1/rules/rules-tutorial.mdx b/8.3.1/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/8.3.1/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/8.3.1/rules/testing.mdx b/8.3.1/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/8.3.1/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/8.3.1/rules/verbs-tutorial.mdx b/8.3.1/rules/verbs-tutorial.mdx deleted file mode 100644 index db7757e..0000000 --- a/8.3.1/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following symbolic macro (available in Bazel 8 or newer) to -create targets for both of the above rules together: - -```starlark -def _sphinx_site_impl(name, visibility, srcs, **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. We - # set `visibility = visibility` to make it visible to callers of the - # macro. - _sphinx_site(name = name, visibility = visibility, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. We don't want it to be visible to callers of - # our macro, so we omit visibility for it. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) - -sphinx_site = macro( - implementation = _sphinx_site_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, - # Inherit common attributes like tags and testonly - inherit_attrs = "common", -) -``` - -Or, if you need to support Bazel releases older than Bazel 8, you would instead -define a legacy macro: - -```starlark -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/8.3.1/run/bazelrc.mdx b/8.3.1/run/bazelrc.mdx deleted file mode 100644 index 15f89c8..0000000 --- a/8.3.1/run/bazelrc.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `MODULE.bazel` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage`, `fetch`, and `vendor` inherit from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### `--enable_platform_specific_config` - -Platform specific configs in the `.bazelrc` can be automatically enabled using -`--enable_platform_specific_config`. For example, if the host OS is Linux and -the `build` command is run, the `build:linux` configuration will be -automatically enabled. Supported OS identifiers are `linux`, `macos`, `windows`, -`freebsd`, and `openbsd`. Enabling this flag is equivalent to using -`--config=linux` on Linux, `--config=windows` on Windows, and so on. - -See [--enable_platform_specific_config](/reference/command-line-reference#flag--enable_platform_specific_config). - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/8.3.1/run/client-server.mdx b/8.3.1/run/client-server.mdx deleted file mode 100644 index 1868635..0000000 --- a/8.3.1/run/client-server.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. Each server can handle at most one -invocation at a time; further concurrent invocations will either block or -fail-fast (see `--block_for_lock`). - -When you run `bazel`, you're running the client. The client finds the server -based on the [output base](/run/scripts#output-base-option), which by default is -determined by the path of the base workspace directory and your userid, so if -you build in multiple workspaces, you'll have multiple output bases and thus -multiple Bazel server processes. Multiple users on the same workstation can -build concurrently in the same workspace because their output bases will differ -(different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/8.3.1/run/scripts.mdx b/8.3.1/run/scripts.mdx deleted file mode 100644 index f267c90..0000000 --- a/8.3.1/run/scripts.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you should be mindful that each Blaze server process can handle at most one -invocation [at a time](/run/client-server#clientserver-implementation). -Depending on your situation it may make sense for each instance of your script -to wait its turn, or it may make sense to use `--output_base` to run multiple -Blaze servers and use those. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Transient error publishing results to the Build Event Service. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Persistent error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. -- `49` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/8.3.1/start/android-app.mdx b/8.3.1/start/android-app.mdx deleted file mode 100644 index b0e6f1b..0000000 --- a/8.3.1/start/android-app.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel workspace that contains the source code - for the app and a `MODULE.bazel` file that identifies the top level of the - workspace directory. -* Update the `MODULE.bazel` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `MODULE.bazel` file at -its root. - -The `MODULE.bazel` file may be empty or may contain references to [external -dependencies](/external/overview) required to build your project. - -First, run the following command to create an empty `MODULE.bazel` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch MODULE.bazel` | -| Windows (Command Prompt) | `type nul > MODULE.bazel` | -| Windows (PowerShell) | `New-Item MODULE.bazel -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`MODULE.bazel` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`MODULE.bazel` file so that Bazel knows where to find them. - -Add the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android", version = "0.5.1") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and use `rules_android_ndk` by adding the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android_ndk", version = "0.1.2") -``` - - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `MODULE.bazel` file. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `MODULE.bazel` file is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/8.3.1/start/cpp.mdx b/8.3.1/start/cpp.mdx deleted file mode 100644 index adb7c71..0000000 --- a/8.3.1/start/cpp.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You're in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel's context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven't -already. This tutorial uses Git for source control, so for best results [install -Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` -directory. - -Take a look at how it's structured: - -```none -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── MODULE.bazel - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In the third and final -stage, you will build a project with multiple packages and build it with -multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your -[workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -Before you can build a project, you need to set up its workspace. A workspace -is a directory that holds your project's source files and Bazel's build outputs. -It also contains these significant files: - -* The `MODULE.bazel` file, which identifies the directory and its contents as - a Bazel workspace and lives at the root of the project's directory - structure. It's also where you specify your external dependencies. -* One or more [`BUILD` - files](https://bazel.build/reference/glossary#build-file), which tell Bazel - how to build different parts of the project. A directory within the - workspace that contains a `BUILD` file is a - [package](https://bazel.build/reference/glossary#package). (More on packages - later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `MODULE.bazel` in that directory. For the purposes of this -tutorial, a `MODULE.bazel` file is already present in each stage. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one -[rule](https://bazel.build/reference/glossary#rule) as a set of instructions, -which tells Bazel how to build the outputs you want, such as executable binaries -or libraries. Each instance of a build rule in the `BUILD` file is called a -[target](https://bazel.build/reference/glossary#target) and points to a specific -set of source files and -[dependencies](https://bazel.build/reference/glossary#dependency). A target can -also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -```bazel -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[`cc_binary` rule](https://bazel.build/reference/be/c-cpp#cc_binary). The rule -tells Bazel to build a self-contained executable binary from the -`hello-world.cc`> source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - -## Stage 1: single target, single package - -It's time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -```none -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── MODULE.bazel -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed "`Hello world`" message. - -Here's the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source -file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world -displays a single target with a single source file.") - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a -build is structured. In the next stage, you will add complexity by adding -another target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up -your builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -```none - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel -``` - -Take a look at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library (using -Bazel's built-in [`cc_library` -rule](https://bazel.build/reference/be/c-cpp#cc_library)), then the -`hello-world` binary. The `deps` attribute in the `hello-world` target tells -Bazel that the `hello-greet` library is required to build the `hello-world` -binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another "`Hello -world`": - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an -extra input named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after -modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency -graph for `hello-world` displays dependency changes after modification to the -file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look at the structure and contents of the -`cpp-tutorial/stage3` directory: - -```none -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -```bazel -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package -depends on the target in the `lib` -package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for -`hello-world` displays how the target in the main package depends on the target -in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You've now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use - cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the - tutorials for [Java](https://bazel.build/start/java), [Android - application](https://bazel.build/start/android-app), or [iOS - application](https://bazel.build/start/ios-app). -* To learn more about working with local and remote repositories, read about - [external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel's other rules, see this [reference - guide](https://bazel.build/rules). - -Happy building! diff --git a/8.3.1/start/ios-app.mdx b/8.3.1/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/8.3.1/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/8.3.1/start/java.mdx b/8.3.1/start/java.mdx deleted file mode 100644 index b892917..0000000 --- a/8.3.1/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -one source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/8.3.1/tutorials/cpp-dependency.mdx b/8.3.1/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/8.3.1/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/8.3.1/tutorials/cpp-labels.mdx b/8.3.1/tutorials/cpp-labels.mdx deleted file mode 100644 index 78d0dbc..0000000 --- a/8.3.1/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `MODULE.bazel` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/8.3.1/tutorials/cpp-use-cases.mdx b/8.3.1/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 6695cce..0000000 --- a/8.3.1/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── MODULE.bazel -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Include external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest) -. -You can add a dependency on it in the `MODULE.bazel` file to -download Google Test and make it available in your repository: - -```python -bazel_dep(name = "googletest", version = "1.15.2") -``` - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = [ - "-Iexternal/gtest/googletest/include", - "-Iexternal/gtest/googletest", - ], - deps = [ - "@googletest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/8.3.1/versions/index.mdx b/8.3.1/versions/index.mdx deleted file mode 100644 index 4290e57..0000000 --- a/8.3.1/versions/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The default documentation on this website represents the latest version at HEAD. -Each major and minor supported release will have a snapshot of the narrative and -reference documentation that follows the lifecycle of Bazel's version support. - -To see documentation for stable Bazel versions, use the "Versioned docs" -drop-down. - -To see documentation for older Bazel versions prior to Feb 2022, go to -[docs.bazel.build](https://docs.bazel.build/). diff --git a/8.4.2/about/faq.mdx b/8.4.2/about/faq.mdx deleted file mode 100644 index dd5be8a..0000000 --- a/8.4.2/about/faq.mdx +++ /dev/null @@ -1,209 +0,0 @@ ---- -title: 'FAQ' ---- - - - -If you have questions or need support, see [Getting Help](/help). - -## What is Bazel? - -Bazel is a tool that automates software builds and tests. Supported build tasks include running compilers and linkers to produce executable programs and libraries, and assembling deployable packages for Android, iOS and other target environments. Bazel is similar to other tools like Make, Ant, Gradle, Buck, Pants and Maven. - -## What is special about Bazel? - -Bazel was designed to fit the way software is developed at Google. It has the following features: - -* Multi-language support: Bazel supports [many languages](/reference/be/overview), and can be extended to support arbitrary programming languages. -* High-level build language: Projects are described in the `BUILD` language, a concise text format that describes a project as sets of small interconnected libraries, binaries and tests. In contrast, with tools like Make, you have to describe individual files and compiler invocations. -* Multi-platform support: The same tool and the same `BUILD` files can be used to build software for different architectures, and even different platforms. At Google, we use Bazel to build everything from server applications running on systems in our data centers to client apps running on mobile phones. -* Reproducibility: In `BUILD` files, each library, test and binary must specify its direct dependencies completely. Bazel uses this dependency information to know what must be rebuilt when you make changes to a source file, and which tasks can run in parallel. This means that all builds are incremental and will always produce the same result. -* Scalable: Bazel can handle large builds; at Google, it is common for a server binary to have 100k source files, and builds where no files were changed take about ~200ms. - -## Why doesn’t Google use...? - -* Make, Ninja: These tools give very exact control over what commands get invoked to build files, but it’s up to the user to write rules that are correct. - * Users interact with Bazel on a higher level. For example, Bazel has built-in rules for “Java test”, “C++ binary”, and notions such as “target platform” and “host platform”. These rules have been battle tested to be foolproof. -* Ant and Maven: Ant and Maven are primarily geared toward Java, while Bazel handles multiple languages. Bazel encourages subdividing codebases in smaller reusable units, and can rebuild only ones that need rebuilding. This speeds up development when working with larger codebases. -* Gradle: Bazel configuration files are much more structured than Gradle’s, letting Bazel understand exactly what each action does. This allows for more parallelism and better reproducibility. -* Pants, Buck: Both tools were created and developed by ex-Googlers at Twitter and Foursquare, and Facebook respectively. They have been modeled after Bazel, but their feature sets are different, so they aren’t viable alternatives for us. - -## Where did Bazel come from? - -Bazel is a flavor of the tool that Google uses to build its server software internally. It has expanded to build other software as well, like mobile apps (iOS, Android) that connect to our servers. - -## Did you rewrite your internal tool as open-source? Is it a fork? - -Bazel shares most of its code with the internal tool and its rules are used for millions of builds every day. - -## Why did Google build Bazel? - -A long time ago, Google built its software using large, generated Makefiles. These led to slow and unreliable builds, which began to interfere with our developers’ productivity and the company’s agility. Bazel was a way to solve these problems. - -## Does Bazel require a build cluster? - -Bazel runs build operations locally by default. However, Bazel can also connect to a build cluster for even faster builds and tests. See our documentation on [remote execution and caching](/remote/rbe) and [remote caching](/remote/caching) for further details. - -## How does the Google development process work? - -For our server code base, we use the following development workflow: - -* All our server code is in a single, gigantic version control system. -* Everybody builds their software with Bazel. -* Different teams own different parts of the source tree, and make their components available as `BUILD` targets. -* Branching is primarily used for managing releases, so everybody develops their software at the head revision. - -Bazel is a cornerstone of this philosophy: since Bazel requires all dependencies to be fully specified, we can predict which programs and tests are affected by a change, and vet them before submission. - -More background on the development process at Google can be found on the [eng tools blog](http://google-engtools.blogspot.com/). - -## Why did you open up Bazel? - -Building software should be fun and easy. Slow and unpredictable builds take the fun out of programming. - -## Why would I want to use Bazel? - -* Bazel may give you faster build times because it can recompile only the files that need to be recompiled. Similarly, it can skip re-running tests that it knows haven’t changed. -* Bazel produces deterministic results. This eliminates skew between incremental and clean builds, laptop and CI system, etc. -* Bazel can build different client and server apps with the same tool from the same workspace. For example, you can change a client/server protocol in a single commit, and test that the updated mobile app works with the updated server, building both with the same tool, reaping all the aforementioned benefits of Bazel. - -## Can I see examples? - -Yes; see a [simple example](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD) -or read the [Bazel source code](https://github.com/bazelbuild/bazel/blob/master/src/BUILD) for a more complex example. - - -## What is Bazel best at? - -Bazel shines at building and testing projects with the following properties: - -* Projects with a large codebase -* Projects written in (multiple) compiled languages -* Projects that deploy on multiple platforms -* Projects that have extensive tests - -## Where can I run Bazel? - -Bazel runs on Linux, macOS (OS X), and Windows. - -Porting to other UNIX platforms should be relatively easy, as long as a JDK is available for the platform. - -## What should I not use Bazel for? - -* Bazel tries to be smart about caching. This means that it is not good for running build operations whose outputs should not be cached. For example, the following steps should not be run from Bazel: - * A compilation step that fetches data from the internet. - * A test step that connects to the QA instance of your site. - * A deployment step that changes your site’s cloud configuration. -* If your build consists of a few long, sequential steps, Bazel may not be able to help much. You’ll get more speed by breaking long steps into smaller, discrete targets that Bazel can run in parallel. - -## How stable is Bazel’s feature set? - -The core features (C++, Java, and shell rules) have extensive use inside Google, so they are thoroughly tested and have very little churn. Similarly, we test new versions of Bazel across hundreds of thousands of targets every day to find regressions, and we release new versions multiple times every month. - -In short, except for features marked as experimental, Bazel should Just Work. Changes to non-experimental rules will be backward compatible. A more detailed list of feature support statuses can be found in our [support document](/contribute/support). - -## How stable is Bazel as a binary? - -Inside Google, we make sure that Bazel crashes are very rare. This should also hold for our open source codebase. - -## How can I start using Bazel? - -See [Getting Started](/start/). - -## Doesn’t Docker solve the reproducibility problems? - -With Docker you can easily create sandboxes with fixed OS releases, for example, Ubuntu 12.04, Fedora 21. This solves the problem of reproducibility for the system environment – that is, “which version of /usr/bin/c++ do I need?” - -Docker does not address reproducibility with regard to changes in the source code. Running Make with an imperfectly written Makefile inside a Docker container can still yield unpredictable results. - -Inside Google, we check tools into source control for reproducibility. In this way, we can vet changes to tools (“upgrade GCC to 4.6.1”) with the same mechanism as changes to base libraries (“fix bounds check in OpenSSL”). - -## Can I build binaries for deployment on Docker? - -With Bazel, you can build standalone, statically linked binaries in C/C++, and self-contained jar files for Java. These run with few dependencies on normal UNIX systems, and as such should be simple to install inside a Docker container. - -Bazel has conventions for structuring more complex programs, for example, a Java program that consumes a set of data files, or runs another program as subprocess. It is possible to package up such environments as standalone archives, so they can be deployed on different systems, including Docker images. - -## Can I build Docker images with Bazel? - -Yes, you can use our [Docker rules](https://github.com/bazelbuild/rules_docker) to build reproducible Docker images. - -## Will Bazel make my builds reproducible automatically? - -For Java and C++ binaries, yes, assuming you do not change the toolchain. If you have build steps that involve custom recipes (for example, executing binaries through a shell script inside a rule), you will need to take some extra care: - -* Do not use dependencies that were not declared. Sandboxed execution (–spawn\_strategy=sandboxed, only on Linux) can help find undeclared dependencies. -* Avoid storing timestamps and user-IDs in generated files. ZIP files and other archives are especially prone to this. -* Avoid connecting to the network. Sandboxed execution can help here too. -* Avoid processes that use random numbers, in particular, dictionary traversal is randomized in many programming languages. - -## Do you have binary releases? - -Yes, you can find the latest [release binaries](https://github.com/bazelbuild/bazel/releases/latest) and review our [release policy](/release/) - -## I use Eclipse/IntelliJ/XCode. How does Bazel interoperate with IDEs? - -For IntelliJ, check out the [IntelliJ with Bazel plugin](https://ij.bazel.build/). - -For XCode, check out [Tulsi](http://tulsi.bazel.build/). - -For Eclipse, check out [E4B plugin](https://github.com/bazelbuild/e4b). - -For other IDEs, check out the [blog post](https://blog.bazel.build/2016/06/10/ide-support.html) on how these plugins work. - -## I use Jenkins/CircleCI/TravisCI. How does Bazel interoperate with CI systems? - -Bazel returns a non-zero exit code if the build or test invocation fails, and this should be enough for basic CI integration. Since Bazel does not need clean builds for correctness, the CI system should not be configured to clean before starting a build/test run. - -Further details on exit codes are in the [User Manual](/docs/user-manual). - -## What future features can we expect in Bazel? - -See our [Roadmaps](/about/roadmap). - -## Can I use Bazel for my INSERT LANGUAGE HERE project? - -Bazel is extensible. Anyone can add support for new languages. Many languages are supported: see the [build encyclopedia](/reference/be/overview) for a list of recommendations and [awesomebazel.com](https://awesomebazel.com/) for a more comprehensive list. - -If you would like to develop extensions or learn how they work, see the documentation for [extending Bazel](/extending/concepts). - -## Can I contribute to the Bazel code base? - -See our [contribution guidelines](/contribute/). - -## Why isn’t all development done in the open? - -We still have to refactor the interfaces between the public code in Bazel and our internal extensions frequently. This makes it hard to do much development in the open. - -## Are you done open sourcing Bazel? - -Open sourcing Bazel is a work-in-progress. In particular, we’re still working on open sourcing: - -* Many of our unit and integration tests (which should make contributing patches easier). -* Full IDE integration. - -Beyond code, we’d like to eventually have all code reviews, bug tracking, and design decisions happen publicly, with the Bazel community involved. We are not there yet, so some changes will simply appear in the Bazel repository without clear explanation. Despite this lack of transparency, we want to support external developers and collaborate. Thus, we are opening up the code, even though some of the development is still happening internal to Google. Please let us know if anything seems unclear or unjustified as we transition to an open model. - -## Are there parts of Bazel that will never be open sourced? - -Yes, some of the code base either integrates with Google-specific technology or we have been looking for an excuse to get rid of (or is some combination of the two). These parts of the code base are not available on GitHub and probably never will be. - -## How do I contact the team? - -We are reachable at bazel-discuss@googlegroups.com. - -## Where do I report bugs? - -Open an issue [on GitHub](https://github.com/bazelbuild/bazel/issues). - -## What’s up with the word “Blaze” in the codebase? - -This is an internal name for the tool. Please refer to Blaze as Bazel. - -## Why do other Google projects (Android, Chrome) use other build tools? - -Until the first (Alpha) release, Bazel was not available externally, so open source projects such as Chromium and Android could not use it. In addition, the original lack of Windows support was a problem for building Windows applications, such as Chrome. Since the project has matured and become more stable, the [Android Open Source Project](https://source.android.com/) is in the process of migrating to Bazel. - -## How do you pronounce “Bazel”? - -The same way as “basil” (the herb) in US English: “BAY-zel”. It rhymes with “hazel”. IPA: /ˈbeɪzˌəl/ diff --git a/8.4.2/about/intro.mdx b/8.4.2/about/intro.mdx deleted file mode 100644 index a531ac2..0000000 --- a/8.4.2/about/intro.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Intro to Bazel' ---- - - - -Bazel is an open-source build and test tool similar to Make, Maven, and Gradle. -It uses a human-readable, high-level build language. Bazel supports projects in -multiple languages and builds outputs for multiple platforms. Bazel supports -large codebases across multiple repositories, and large numbers of users. - -## Benefits - -Bazel offers the following advantages: - -* **High-level build language.** Bazel uses an abstract, human-readable - language to describe the build properties of your project at a high - semantical level. Unlike other tools, Bazel operates on the *concepts* - of libraries, binaries, scripts, and data sets, shielding you from the - complexity of writing individual calls to tools such as compilers and - linkers. - -* **Bazel is fast and reliable.** Bazel caches all previously done work and - tracks changes to both file content and build commands. This way, Bazel - knows when something needs to be rebuilt, and rebuilds only that. To further - speed up your builds, you can set up your project to build in a highly - parallel and incremental fashion. - -* **Bazel is multi-platform.** Bazel runs on Linux, macOS, and Windows. Bazel - can build binaries and deployable packages for multiple platforms, including - desktop, server, and mobile, from the same project. - -* **Bazel scales.** Bazel maintains agility while handling builds with 100k+ - source files. It works with multiple repositories and user bases in the tens - of thousands. - -* **Bazel is extensible.** Many [languages](/rules) are - supported, and you can extend Bazel to support any other language or - framework. - -## Using Bazel - -To build or test a project with Bazel, you typically do the following: - -1. **Set up Bazel.** Download and [install Bazel](/install). - -2. **Set up a project [workspace](/concepts/build-ref#workspaces)**, which is a - directory where Bazel looks for build inputs and `BUILD` files, and where it - stores build outputs. - -3. **Write a `BUILD` file**, which tells Bazel what to build and how to - build it. - - You write your `BUILD` file by declaring build targets using - [Starlark](/rules/language), a domain-specific language. (See example - [here](https://github.com/bazelbuild/bazel/blob/master/examples/cpp/BUILD).) - - A build target specifies a set of input artifacts that Bazel will build plus - their dependencies, the build rule Bazel will use to build it, and options - that configure the build rule. - - A build rule specifies the build tools Bazel will use, such as compilers and - linkers, and their configurations. Bazel ships with a number of build rules - covering the most common artifact types in the supported languages on - supported platforms. - -4. **Run Bazel** from the [command line](/reference/command-line-reference). Bazel - places your outputs within the workspace. - -In addition to building, you can also use Bazel to run -[tests](/reference/test-encyclopedia) and [query](/query/guide) the build -to trace dependencies in your code. - -## Bazel build process - -When running a build or a test, Bazel does the following: - -1. **Loads** the `BUILD` files relevant to the target. - -2. **Analyzes** the inputs and their - [dependencies](/concepts/dependencies), applies the specified build - rules, and produces an [action](/extending/concepts#evaluation-model) - graph. - -3. **Executes** the build actions on the inputs until the final build outputs - are produced. - -Since all previous build work is cached, Bazel can identify and reuse cached -artifacts and only rebuild or retest what's changed. To further enforce -correctness, you can set up Bazel to run builds and tests -[hermetically](/basics/hermeticity) through sandboxing, minimizing skew -and maximizing [reproducibility](/run/build#correct-incremental-rebuilds). - -### Action graph - -The action graph represents the build artifacts, the relationships between them, -and the build actions that Bazel will perform. Thanks to this graph, Bazel can -[track](/run/build#build-consistency) changes to -file content as well as changes to actions, such as build or test commands, and -know what build work has previously been done. The graph also enables you to -easily [trace dependencies](/query/guide) in your code. - -## Getting started tutorials - -To get started with Bazel, see [Getting Started](/start/) or jump -directly to the Bazel tutorials: - -* [Tutorial: Build a C++ Project](/start/cpp) -* [Tutorial: Build a Java Project](/start/java) -* [Tutorial: Build an Android Application](/start/android-app) -* [Tutorial: Build an iOS Application](/start/ios-app) diff --git a/8.4.2/about/roadmap.mdx b/8.4.2/about/roadmap.mdx deleted file mode 100644 index 2e18b78..0000000 --- a/8.4.2/about/roadmap.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Bazel roadmap' ---- - - - -## Overview - -As the Bazel project continues to evolve in response to your needs, we want to -share our 2024 update. - -This roadmap describes current initiatives and predictions for the future of -Bazel development, giving you visibility into current priorities and ongoing -projects. - -## Bazel 8.0 Release - -We plan to bring Bazel 8.0 [long term support -(LTS)](https://bazel.build/release/versioning) to you in late 2024. -The following features are planned to be implemented. - -### Bzlmod: external dependency management system - -[Bzlmod](https://bazel.build/docs/bzlmod) automatically resolves transitive -dependencies, allowing projects to scale while staying fast and -resource-efficient. - -With Bazel 8, we will disable WORKSPACE support by default (it will still be -possible to enable it using `--enable_workspace`); with Bazel 9 WORKSPACE -support will be removed. Starting with Bazel 7.1, you can set -`--noenable_workspace` to opt into the new behavior. - -Bazel 8.0 will contain a number of enhancements to -[Bazel's external dependency management] -(https://docs.google.com/document/d/1moQfNcEIttsk6vYanNKIy3ZuK53hQUFq1b1r0rmsYVg/edit#heading=h.lgyp7ubwxmjc) -functionality, including: - -* The new flag `--enable_workspace` can be set to `false` to completely - disable WORKSPACE functionality. -* New directory watching API (see - [#21435](https://github.com/bazelbuild/bazel/pull/21435), shipped in Bazel - 7.1). -* Improved scheme for generating canonical repository names for better - cacheability of actions across dependency version updates. - ([#21316](https://github.com/bazelbuild/bazel/pull/21316), shipped in Bazel - 7.1) -* An improved shared repository cache (see - [#12227](https://github.com/bazelbuild/bazel/issues/12227)). -* Vendor and offline mode support — allows users to run builds with - pre-downloaded dependencies (see - [#19563](https://github.com/bazelbuild/bazel/issues/19563)). -* Reduced merge conflicts in lock files - ([#20396](https://github.com/bazelbuild/bazel/issues/20369)). -* Segmented MODULE.bazel - ([#17880](https://github.com/bazelbuild/bazel/issues/17880)) -* Allow overriding module extension generated repository - ([#19301](https://github.com/bazelbuild/bazel/issues/19301)) -* Improved documentation (e.g. - [#18030](https://github.com/bazelbuild/bazel/issues/18030), - [#15821](https://github.com/bazelbuild/bazel/issues/15821)) and migration - guide and migration tooling. - -### Remote execution improvements - -* Add support for asynchronous execution, speeding up remote execution by - increased parallelism with flag `--jobs`. -* Make it easier to debug cache misses by a new compact execution log, - reducing its size by 100x and its runtime overhead significantly (see - [#18643](https://github.com/bazelbuild/bazel/issues/18643)). -* Implement garbage collection for the disk cache (see - [#5139](https://github.com/bazelbuild/bazel/issues/5139)). -* Implement remote output service to allow lazy downloading of arbitrary build - outputs (see - [#20933](https://github.com/bazelbuild/bazel/discussions/20933)). - -### Migration of Android, C++, Java, Python, and Proto rules - -Complete migration of Android, C++, Java, and Python rulesets to dedicated -repositories and decoupling them from the Bazel releases. This effort allows -Bazel users and rule authors to - -* Update rules independently of Bazel. -* Update and customize rules as needed. - -The new location of the rulesets is going to be `bazelbuild/rules_android`, -`rules_cc`, `rules_java`, `rules_python` and `google/protobuf`. `rules_proto` is -going to be deprecated. - -Bazel 8 will provide a temporary migration flag that will automatically use the -rulesets that were previously part of the binary from their repositories. All -the users of those rulesets are expected to eventually depend on their -repositories and load them similarly to other rulesets that were never part of -Bazel. - -Bazel 8 will also improve on the existing extending rules and subrule APIs and -mark them as non-experimental. - -### Starlark improvements - -* Symbolic Macros are a new way of writing macros that is friendlier to - `BUILD` users, macro authors, and tooling. Compared to legacy macros, which - Bazel has only limited insight into, symbolic macros help users avoid common - pitfalls and enforce best practices. -* Package finalizers are a proposed feature for adding first-class support for - custom package validation logic. They are intended to help us deprecate - `native.existing_rules()`. - -### Configurability - -* Output path mapping continues to stabilize: promising better remote cache - performance and build speed for rule designers who use transitions. -* Automatically set build flags suitable for a given `--platforms`. -* Define project-supported flag combinations and automatically build targets - with default flags without having to set bazelrcs. -* Don't redo build analysis every time build flags change. - -### Project Skyfocus - minimize retained data structures - -Bazel holds a lot of state in RAM for fast incremental builds. However, -developers often change a small subset of the source files (e.g. almost never -one of the external dependencies). With Skyfocus, Bazel will provide an -experimental way to drop unnecessary incremental state and reduce Bazel's memory -footprint, while still providing the same fast incremental build experience. - -The initial scope aims to improve the retained heap metric only. Peak heap -reduction is a possibility, but not included in the initial scope. - -### Misc - -* Mobile install v3, a simpler and better maintained approach to incrementally - deploy Android applications. -* Garbage collection for repository caches and Bazel's `install_base`. -* Reduced sandboxing overhead. - -### Bazel-JetBrains* IntelliJ IDEA support - -Incremental IntelliJ plugin updates to support the latest JetBrains plugin -release. - -*This roadmap snapshots targets, and should not be taken as guarantees. -Priorities are subject to change in response to developer and customer -feedback, or new market opportunities.* - -*To be notified of new features — including updates to this roadmap — join the -[Google Group](https://groups.google.com/g/bazel-discuss) community.* - -*Copyright © 2022 JetBrains s.r.o. JetBrains and IntelliJ are registered trademarks of JetBrains s.r.o diff --git a/8.4.2/about/vision.mdx b/8.4.2/about/vision.mdx deleted file mode 100644 index da0ed02..0000000 --- a/8.4.2/about/vision.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Bazel Vision' ---- - - - -Any software developer can efficiently build, test, and package -any project, of any size or complexity, with tooling that's easy to adopt and -extend. - -* **Engineers can take build fundamentals for granted.** Software developers - focus on the creative process of authoring code because the mechanical - process of build and test is solved. When customizing the build system to - support new languages or unique organizational needs, users focus on the - aspects of extensibility that are unique to their use case, without having - to reinvent the basic plumbing. - -* **Engineers can easily contribute to any project.** A developer who wants to - start working on a new project can simply clone the project and run the - build. There's no need for local configuration - it just works. With - cross-platform remote execution, they can work on any machine anywhere and - fully test their changes against all platforms the project targets. - Engineers can quickly configure the build for a new project or incrementally - migrate an existing build. - -* **Projects can scale to any size codebase, any size team.** Fast, - incremental testing allows teams to fully validate every change before it is - committed. This remains true even as repos grow, projects span multiple - repos, and multiple languages are introduced. Infrastructure does not force - developers to trade test coverage for build speed. - -**We believe Bazel has the potential to fulfill this vision.** - -Bazel was built from the ground up to enable builds that are reproducible (a -given set of inputs will always produce the same outputs) and portable (a build -can be run on any machine without affecting the output). - -These characteristics support safe incrementality (rebuilding only changed -inputs doesn't introduce the risk of corruption) and distributability (build -actions are isolated and can be offloaded). By minimizing the work needed to do -a correct build and parallelizing that work across multiple cores and remote -systems, Bazel can make any build fast. - -Bazel's abstraction layer — instructions specific to languages, platforms, and -toolchains implemented in a simple extensibility language — allows it to be -easily applied to any context. - -## Bazel core competencies - -1. Bazel supports **multi-language, multi-platform** builds and tests. You can - run a single command to build and test your entire source tree, no matter - which combination of languages and platforms you target. -1. Bazel builds are **fast and correct**. Every build and test run is - incremental, on your developers' machines and on CI. -1. Bazel provides a **uniform, extensible language** to define builds for any - language or platform. -1. Bazel allows your builds **to scale** by connecting to remote execution and - caching services. -1. Bazel works across **all major development platforms** (Linux, MacOS, and - Windows). -1. We accept that adopting Bazel requires effort, but **gradual adoption** is - possible. Bazel interfaces with de-facto standard tools for a given - language/platform. - -## Serving language communities - -Software engineering evolves in the context of language communities — typically, -self-organizing groups of people who use common tools and practices. - -To be of use to members of a language community, high-quality Bazel rules must be -available that integrate with the workflows and conventions of that community. - -Bazel is committed to be extensible and open, and to support good rulesets for -any language. - -### Requirements of a good ruleset - -1. The rules need to support efficient **building and testing** for the - language, including code coverage. -1. The rules need to **interface with a widely-used "package manager"** for the - language (such as Maven for Java), and support incremental migration paths - from other widely-used build systems. -1. The rules need to be **extensible and interoperable**, following - ["Bazel sandwich"](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-08-04-extensibility-for-native-rules.md) - principles. -1. The rules need to be **remote-execution ready**. In practice, this means - **configurable using the [toolchains](/extending/toolchains) mechanism**. -1. The rules (and Bazel) need to interface with a **widely-used IDE** for the - language, if there is one. -1. The rules need to have **thorough, usable documentation,** with introductory - material for new users, comprehensive docs for expert users. - -Each of these items is essential and only together do they deliver on Bazel's -competencies for their particular ecosystem. - -They are also, by and large, sufficient - once all are fulfilled, Bazel fully -delivers its value to members of that language community. diff --git a/8.4.2/about/why.mdx b/8.4.2/about/why.mdx deleted file mode 100644 index 97cfa36..0000000 --- a/8.4.2/about/why.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Why Bazel?' ---- - - - -Bazel is a [fast](#fast), [correct](#correct), and [extensible](#extensible) -build tool with [integrated testing](#integrated-testing) that supports multiple -[languages](#multi-language), [repositories](#multi-repository), and -[platforms](#multi-platform) in an industry-leading [ecosystem](#ecosystem). - -## Bazel is fast - -Bazel knows exactly what input files each build command needs, avoiding -unnecessary work by re-running only when the set of input files have -changed between each build. -It runs build commands with as much parallelism as possible, either within the -same computer or on [remote build nodes](/remote/rbe). If the structure of build -allows for it, it can run thousands of build or test commands at the same time. - -This is supported by multiple caching layers, in memory, on disk and on the -remote build farm, if available. At Google, we routinely achieve cache hit rates -north of 99%. - -## Bazel is correct - -Bazel ensures that your binaries are built *only* from your own -source code. Bazel actions run in individual sandboxes and Bazel tracks -every input file of the build, only and always re-running build -commands when it needs to. This keeps your binaries up-to-date so that the -[same source code always results in the same binary](/basics/hermeticity), bit -by bit. - -Say goodbyte to endless `make clean` invocations and to chasing phantom bugs -that were in fact resolved in source code that never got built. - -## Bazel is extensible - -Harness the full power of Bazel by writing your own rules and macros to -customize Bazel for your specific needs across a wide range of projects. - -Bazel rules are written in [Starlark](/rules/language), our -in-house programming language that's a subset of Python. Starlark makes -rule-writing accessible to most developers, while also creating rules that can -be used across the ecosystem. - -## Integrated testing - -Bazel's [integrated test runner](/docs/user-manual#running-tests) -knows and runs only those tests needing to be re-run, using remote execution -(if available) to run them in parallel. Detect flakes early by using remote -execution to quickly run a test thousands of times. - -Bazel [provides facilities](/remote/bep) to upload test results to a central -location, thereby facilitating efficient communication of test outcomes, be it -on CI or by individual developers. - -## Multi-language support - -Bazel supports many common programming languages including C++, Java, -Kotlin, Python, Go, and Rust. You can build multiple binaries (for example, -backend, web UI and mobile app) in the same Bazel invocation without being -constrained to one language's idiomatic build tool. - -## Multi-repository support - -Bazel can [gather source code from multiple locations](/external/overview): you -don't need to vendor your dependencies (but you can!), you can instead point -Bazel to the location of your source code or prebuilt artifacts (e.g. a git -repository or Maven Central), and it takes care of the rest. - -## Multi-platform support - -Bazel can simultaneously build projects for multiple platforms including Linux, -macOS, Windows, and Android. It also provides powerful -[cross-compilation capabilities](/extending/platforms) to build code for one -platform while running the build on another. - -## Wide ecosystem - -[Industry leaders](/community/users) love Bazel, building a large -community of developers who use and contribute to Bazel. Find a tools, services -and documentation, including [consulting and SaaS offerings](/community/experts) -Bazel can use. Explore extensions like support for programming languages in -our [open source software repositories](/rules). diff --git a/8.4.2/advanced/performance/build-performance-breakdown.mdx b/8.4.2/advanced/performance/build-performance-breakdown.mdx deleted file mode 100644 index 477e757..0000000 --- a/8.4.2/advanced/performance/build-performance-breakdown.mdx +++ /dev/null @@ -1,235 +0,0 @@ ---- -title: 'Breaking down build performance' ---- - - - -Bazel is complex and does a lot of different things over the course of a build, -some of which can have an impact on build performance. This page attempts to map -some of these Bazel concepts to their implications on build performance. While -not extensive, we have included some examples of how to detect build performance -issues through [extracting metrics](/configure/build-performance-metrics) -and what you can do to fix them. With this, we hope you can apply these concepts -when investigating build performance regressions. - -### Clean vs Incremental builds - -A clean build is one that builds everything from scratch, while an incremental -build reuses some already completed work. - -We suggest looking at clean and incremental builds separately, especially when -you are collecting / aggregating metrics that are dependent on the state of -Bazel’s caches (for example -[build request size metrics](#deterministic-build-metrics-as-a-proxy-for-build-performance) -). They also represent two different user experiences. As compared to starting -a clean build from scratch (which takes longer due to a cold cache), incremental -builds happen far more frequently as developers iterate on code (typically -faster since the cache is usually already warm). - -You can use the `CumulativeMetrics.num_analyses` field in the BEP to classify -builds. If `num_analyses <= 1`, it is a clean build; otherwise, we can broadly -categorize it to likely be an incremental build - the user could have switched -to different flags or different targets causing an effectively clean build. Any -more rigorous definition of incrementality will likely have to come in the form -of a heuristic, for example looking at the number of packages loaded -(`PackageMetrics.packages_loaded`). - -### Deterministic build metrics as a proxy for build performance - -Measuring build performance can be difficult due to the non-deterministic nature -of certain metrics (for example Bazel’s CPU time or queue times on a remote -cluster). As such, it can be useful to use deterministic metrics as a proxy for -the amount of work done by Bazel, which in turn affects its performance. - -The size of a build request can have a significant implication on build -performance. A larger build could represent more work in analyzing and -constructing the build graphs. Organic growth of builds comes naturally with -development, as more dependencies are added/created, and thus grow in complexity -and become more expensive to build. - -We can slice this problem into the various build phases, and use the following -metrics as proxy metrics for work done at each phase: - -1. `PackageMetrics.packages_loaded`: the number of packages successfully loaded. - A regression here represents more work that needs to be done to read and parse - each additional BUILD file in the loading phase. - - This is often due to the addition of dependencies and having to load their - transitive closure. - - Use [query](/query/quickstart) / [cquery](/query/cquery) to find - where new dependencies might have been added. - -2. `TargetMetrics.targets_configured`: representing the number of targets and - aspects configured in the build. A regression represents more work in - constructing and traversing the configured target graph. - - This is often due to the addition of dependencies and having to construct - the graph of their transitive closure. - - Use [cquery](/query/cquery) to find where new - dependencies might have been added. - -3. `ActionSummary.actions_created`: represents the actions created in the build, - and a regression represents more work in constructing the action graph. Note - that this also includes unused actions that might not have been executed. - - Use [aquery](/query/aquery) for debugging regressions; - we suggest starting with - [`--output=summary`](/reference/command-line-reference#flag--output) - before further drilling down with - [`--skyframe_state`](/reference/command-line-reference#flag--skyframe_state). - -4. `ActionSummary.actions_executed`: the number of actions executed, a - regression directly represents more work in executing these actions. - - The [BEP](/remote/bep) writes out the action statistics - `ActionData` that shows the most executed action types. By default, it - collects the top 20 action types, but you can pass in the - [`--experimental_record_metrics_for_all_mnemonics`](/reference/command-line-reference#flag--experimental_record_metrics_for_all_mnemonics) - to collect this data for all action types that were executed. - - This should help you to figure out what kind of actions were executed - (additionally). - -5. `BuildGraphSummary.outputArtifactCount`: the number of artifacts created by - the executed actions. - - If the number of actions executed did not increase, then it is likely that - a rule implementation was changed. - - -These metrics are all affected by the state of the local cache, hence you will -want to ensure that the builds you extract these metrics from are -**clean builds**. - -We have noted that a regression in any of these metrics can be accompanied by -regressions in wall time, cpu time and memory usage. - -### Usage of local resources - -Bazel consumes a variety of resources on your local machine (both for analyzing -the build graph and driving the execution, and for running local actions), this -can affect the performance / availability of your machine in performing the -build, and also other tasks. - -#### Time spent - -Perhaps the metrics most susceptible to noise (and can vary greatly from build -to build) is time; in particular - wall time, cpu time and system time. You can -use [bazel-bench](https://github.com/bazelbuild/bazel-bench) to get -a benchmark for these metrics, and with a sufficient number of `--runs`, you can -increase the statistical significance of your measurement. - -- **Wall time** is the real world time elapsed. - - If _only_ wall time regresses, we suggest collecting a - [JSON trace profile](/advanced/performance/json-trace-profile) and looking - for differences. Otherwise, it would likely be more efficient to - investigate other regressed metrics as they could have affected the wall - time. - -- **CPU time** is the time spent by the CPU executing user code. - - If the CPU time regresses across two project commits, we suggest collecting - a Starlark CPU profile. You should probably also use `--nobuild` to - restrict the build to the analysis phase since that is where most of the - CPU heavy work is done. - -- System time is the time spent by the CPU in the kernel. - - If system time regresses, it is mostly correlated with I/O when Bazel reads - files from your file system. - -#### System-wide load profiling - -Using the -[`--experimental_collect_load_average_in_profiler`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L306-L312) -flag introduced in Bazel 6.0, the -[JSON trace profiler](/advanced/performance/json-trace-profile) collects the -system load average during the invocation. - -![Profile that includes system load average](/docs/images/json-trace-profile-system-load-average.png "Profile that includes system load average") - -**Figure 1.** Profile that includes system load average. - -A high load during a Bazel invocation can be an indication that Bazel schedules -too many local actions in parallel for your machine. You might want to look into -adjusting -[`--local_cpu_resources`](/reference/command-line-reference#flag--local_cpu_resources) -and [`--local_ram_resources`](/reference/command-line-reference#flag--local_ram_resources), -especially in container environments (at least until -[#16512](https://github.com/bazelbuild/bazel/pull/16512) is merged). - - -#### Monitoring Bazel memory usage - -There are two main sources to get Bazel’s memory usage, Bazel `info` and the -[BEP](/remote/bep). - -- `bazel info used-heap-size-after-gc`: The amount of used memory in bytes after - a call to `System.gc()`. - - [Bazel bench](https://github.com/bazelbuild/bazel-bench) - provides benchmarks for this metric as well. - - Additionally, there are `peak-heap-size`, `max-heap-size`, `used-heap-size` - and `committed-heap-size` (see - [documentation](/docs/user-manual#configuration-independent-data)), but are - less relevant. - -- [BEP](/remote/bep)’s - `MemoryMetrics.peak_post_gc_heap_size`: Size of the peak JVM heap size in - bytes post GC (requires setting - [`--memory_profile`](/reference/command-line-reference#flag--memory_profile) - that attempts to force a full GC). - -A regression in memory usage is usually a result of a regression in -[build request size metrics](#deterministic_build_metrics_as_a_proxy_for_build_performance), -which are often due to addition of dependencies or a change in the rule -implementation. - -To analyze Bazel’s memory footprint on a more granular level, we recommend using -the [built-in memory profiler](/rules/performance#memory-profiling) -for rules. - -#### Memory profiling of persistent workers - -While [persistent workers](/remote/persistent) can help to speed up builds -significantly (especially for interpreted languages) their memory footprint can -be problematic. Bazel collects metrics on its workers, in particular, the -`WorkerMetrics.WorkerStats.worker_memory_in_kb` field tells how much memory -workers use (by mnemonic). - -The [JSON trace profiler](/advanced/performance/json-trace-profile) also -collects persistent worker memory usage during the invocation by passing in the -[`--experimental_collect_system_network_usage`](https://github.com/bazelbuild/bazel/blob/6.0.0/src/main/java/com/google/devtools/build/lib/runtime/CommonCommandOptions.java#L314-L320) -flag (new in Bazel 6.0). - -![Profile that includes workers memory usage](/docs/images/json-trace-profile-workers-memory-usage.png "Profile that includes workers memory usage") - -**Figure 2.** Profile that includes workers memory usage. - -Lowering the value of -[`--worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -(default 4) might help to reduce -the amount of memory used by persistent workers. We are actively working on -making Bazel’s resource manager and scheduler smarter so that such fine tuning -will be required less often in the future. - -### Monitoring network traffic for remote builds - -In remote execution, Bazel downloads artifacts that were built as a result of -executing actions. As such, your network bandwidth can affect the performance -of your build. - -If you are using remote execution for your builds, you might want to consider -monitoring the network traffic during the invocation using the -`NetworkMetrics.SystemNetworkStats` proto from the [BEP](/remote/bep) -(requires passing `--experimental_collect_system_network_usage`). - -Furthermore, [JSON trace profiles](/advanced/performance/json-trace-profile) -allow you to view system-wide network usage throughout the course of the build -by passing the `--experimental_collect_system_network_usage` flag (new in Bazel -6.0). - -![Profile that includes system-wide network usage](/docs/images/json-trace-profile-network-usage.png "Profile that includes system-wide network usage") - -**Figure 3.** Profile that includes system-wide network usage. - -A high but rather flat network usage when using remote execution might indicate -that network is the bottleneck in your build; if you are not using it already, -consider turning on Build without the Bytes by passing -[`--remote_download_minimal`](/reference/command-line-reference#flag--remote_download_minimal). -This will speed up your builds by avoiding the download of unnecessary intermediate artifacts. - -Another option is to configure a local -[disk cache](/reference/command-line-reference#flag--disk_cache) to save on -download bandwidth. diff --git a/8.4.2/advanced/performance/build-performance-metrics.mdx b/8.4.2/advanced/performance/build-performance-metrics.mdx deleted file mode 100644 index 8391ea8..0000000 --- a/8.4.2/advanced/performance/build-performance-metrics.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Extracting build performance metrics' ---- - - - -Probably every Bazel user has experienced builds that were slow or slower than -anticipated. Improving the performance of individual builds has particular value -for targets with significant impact, such as: - -1. Core developer targets that are frequently iterated on and (re)built. - -2. Common libraries widely depended upon by other targets. - -3. A representative target from a class of targets (e.g. custom rules), - diagnosing and fixing issues in one build might help to resolve issues at the - larger scale. - -An important step to improving the performance of builds is to understand where -resources are spent. This page lists different metrics you can collect. -[Breaking down build performance](/configure/build-performance-breakdown) showcases -how you can use these metrics to detect and fix build performance issues. - -There are a few main ways to extract metrics from your Bazel builds, namely: - -## Build Event Protocol (BEP) - -Bazel outputs a variety of protocol buffers -[`build_event_stream.proto`](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -through the [Build Event Protocol (BEP)](/remote/bep), which -can be aggregated by a backend specified by you. Depending on your use cases, -you might decide to aggregate the metrics in various ways, but here we will go -over some concepts and proto fields that would be useful in general to consider. - -## Bazel’s query / cquery / aquery commands - -Bazel provides 3 different query modes ([query](/query/quickstart), -[cquery](/query/cquery) and [aquery](/query/aquery)) that allow users -to query the target graph, configured target graph and action graph -respectively. The query language provides a -[suite of functions](/query/language#functions) usable across the different -query modes, that allows you to customize your queries according to your needs. - -## JSON Trace Profiles - -For every build-like Bazel invocation, Bazel writes a trace profile in JSON -format. The [JSON trace profile](/advanced/performance/json-trace-profile) can -be very useful to quickly understand what Bazel spent time on during the -invocation. - -## Execution Log - -The [execution log](/remote/cache-remote) can help you to troubleshoot and fix -missing remote cache hits due to machine and environment differences or -non-deterministic actions. If you pass the flag -[`--experimental_execution_log_spawn_metrics`](/reference/command-line-reference#flag--experimental_execution_log_spawn_metrics) -(available from Bazel 5.2) it will also contain detailed spawn metrics, both for -locally and remotely executed actions. You can use these metrics for example to -make comparisons between local and remote machine performance or to find out -which part of the spawn execution is consistently slower than expected (for -example due to queuing). - -## Execution Graph Log - -While the JSON trace profile contains the critical path information, sometimes -you need additional information on the dependency graph of the executed actions. -Starting with Bazel 6.0, you can pass the flags -`--experimental_execution_graph_log` and -`--experimental_execution_graph_log_dep_type=all` to write out a log about the -executed actions and their inter-dependencies. - -This information can be used to understand the drag that is added by a node on -the critical path. The drag is the amount of time that can potentially be saved -by removing a particular node from the execution graph. - -The data helps you predict the impact of changes to the build and action graph -before you actually do them. - -## Benchmarking with bazel-bench - -[Bazel bench](https://github.com/bazelbuild/bazel-bench) is a -benchmarking tool for Git projects to benchmark build performance in the -following cases: - -* **Project benchmark:** Benchmarking two git commits against each other at a - single Bazel version. Used to detect regressions in your build (often through - the addition of dependencies). - -* **Bazel benchmark:** Benchmarking two versions of Bazel against each other at - a single git commit. Used to detect regressions within Bazel itself (if you - happen to maintain / fork Bazel). - -Benchmarks monitor wall time, CPU time and system time and Bazel’s retained -heap size. - -It is also recommended to run Bazel bench on dedicated, physical machines that -are not running other processes so as to reduce sources of variability. diff --git a/8.4.2/advanced/performance/iteration-speed.mdx b/8.4.2/advanced/performance/iteration-speed.mdx deleted file mode 100644 index 2bbf839..0000000 --- a/8.4.2/advanced/performance/iteration-speed.mdx +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: 'Optimize Iteration Speed' ---- - - - -This page describes how to optimize Bazel's build performance when running Bazel -repeatedly. - -## Bazel's Runtime State - -A Bazel invocation involves several interacting parts. - -* The `bazel` command line interface (CLI) is the user-facing front-end tool - and receives commands from the user. - -* The CLI tool starts a [*Bazel server*](https://bazel.build/run/client-server) - for each distinct [output base](https://bazel.build/remote/output-directories). - The Bazel server is generally persistent, but will shut down after some idle - time so as to not waste resources. - -* The Bazel server performs the loading and analysis steps for a given command - (`build`, `run`, `cquery`, etc.), in which it constructs the necessary parts - of the build graph in memory. The resulting data structures are retained in - the Bazel server as part of the *analysis cache*. - -* The Bazel server can also perform the action execution, or it can send - actions off for remote execution if it is set up to do so. The results of - action executions are also cached, namely in the *action cache* (or - *execution cache*, which may be either local or remote, and it may be shared - among Bazel servers). - -* The result of the Bazel invocation is made available in the output tree. - -## Running Bazel Iteratively - -In a typical developer workflow, it is common to build (or run) a piece of code -repeatedly, often at a very high frequency (e.g. to resolve some compilation -error or investigate a failing test). In this situation, it is important that -repeated invocations of `bazel` have as little overhead as possible relative to -the underlying, repeated action (e.g. invoking a compiler, or executing a test). - -With this in mind, we take another look at Bazel's runtime state: - -The analysis cache is a critical piece of data. A significant amount of time can -be spent just on the loading and analysis phases of a cold run (i.e. a run just -after the Bazel server was started or when the analysis cache was discarded). -For a single, successful cold build (e.g. for a production release) this cost is -bearable, but for repeatedly building the same target it is important that this -cost be amortized and not repeated on each invocation. - -The analysis cache is rather volatile. First off, it is part of the in-process -state of the Bazel server, so losing the server loses the cache. But the cache -is also *invalidated* very easily: for example, many `bazel` command line flags -cause the cache to be discarded. This is because many flags affect the build -graph (e.g. because of -[configurable attributes](https://bazel.build/configure/attributes)). Some flag -changes can also cause the Bazel server to be restarted (e.g. changing -[startup options](https://bazel.build/docs/user-manual#startup-options)). - -A good execution cache is also valuable for build performance. An execution -cache can be kept locally -[on disk](https://bazel.build/remote/caching#disk-cache), or -[remotely](https://bazel.build/remote/caching). The cache can be shared among -Bazel servers, and indeed among developers. - -## Avoid discarding the analysis cache - -Bazel will print a warning if either the analysis cache was discarded or the -server was restarted. Either of these should be avoided during iterative use: - -* Be mindful of changing `bazel` flags in the middle of an iterative - workflow. For example, mixing a `bazel build -c opt` with a `bazel cquery` - causes each command to discard the analysis cache of the other. In general, - try to use a fixed set of flags for the duration of a particular workflow. - -* Losing the Bazel server loses the analysis cache. The Bazel server has a - [configurable](https://bazel.build/docs/user-manual#max-idle-secs) idle - time, after which it shuts down. You can configure this time via your - bazelrc file to suit your needs. The server also restarted when startup - flags change, so, again, avoid changing those flags if possible. - -* Beware that the Bazel server is killed if you press - Ctrl-C repeatedly while Bazel is running. It is tempting to try to save time - by interrupting a running build that is no longer needed, but only press - Ctrl-C once to request a graceful end of the current invocation. - -* If you want to use multiple sets of flags from the same workspace, you can - use multiple, distinct output bases, switched with the `--output_base` - flag. Each output base gets its own Bazel server. - -To make this condition an error rather than a warning, you can use the -`--noallow_analysis_cache_discard` flag (introduced in Bazel 6.4.0) diff --git a/8.4.2/advanced/performance/json-trace-profile.mdx b/8.4.2/advanced/performance/json-trace-profile.mdx deleted file mode 100644 index 56e278c..0000000 --- a/8.4.2/advanced/performance/json-trace-profile.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'JSON Trace Profile' ---- - - - -The JSON trace profile can be very useful to quickly understand what Bazel spent -time on during the invocation. - -By default, for all build-like commands and query, Bazel writes a profile into -the output base named `command-$INOVCATION_ID.profile.gz`, where -`$INOVCATION_ID` is the invocation identifier of the command. Bazel also creates -a symlink called `command.profile.gz` in the output base that points the profile -of the latest command. You can configure whether a profile is written with the -[`--generate_json_trace_profile`](/reference/command-line-reference#flag--generate_json_trace_profile) -flag, and the location it is written to with the -[`--profile`](/docs/user-manual#profile) flag. Locations ending with `.gz` are -compressed with GZIP. Bazel keeps the last 5 profiles, configurable by -[`--profiles_to_retain`](/reference/command-line-reference#flag--generate_json_trace_profile), -in the output base by default for post-build analysis. Explicitly passing a -profile path with `--profile` disables automatic garbage collection. - -## Tools - -You can load this profile into `chrome://tracing` or analyze and -post-process it with other tools. - -### `chrome://tracing` - -To visualize the profile, open `chrome://tracing` in a Chrome browser tab, -click "Load" and pick the (potentially compressed) profile file. For more -detailed results, click the boxes in the lower left corner. - -Example profile: - -![Example profile](/docs/images/json-trace-profile.png "Example profile") - -**Figure 1.** Example profile. - -You can use these keyboard controls to navigate: - -* Press `1` for "select" mode. In this mode, you can select - particular boxes to inspect the event details (see lower left corner). - Select multiple events to get a summary and aggregated statistics. -* Press `2` for "pan" mode. Then drag the mouse to move the view. You - can also use `a`/`d` to move left/right. -* Press `3` for "zoom" mode. Then drag the mouse to zoom. You can - also use `w`/`s` to zoom in/out. -* Press `4` for "timing" mode where you can measure the distance - between two events. -* Press `?` to learn about all controls. - -### `bazel analyze-profile` - -The Bazel subcommand [`analyze-profile`](/docs/user-manual#analyze-profile) -consumes a profile format and prints cumulative statistics for -different task types for each build phase and an analysis of the critical path. - -For example, the commands - -``` -$ bazel build --profile=/tmp/profile.gz //path/to:target -... -$ bazel analyze-profile /tmp/profile.gz -``` - -may yield output of this form: - -``` -INFO: Profile created on Tue Jun 16 08:59:40 CEST 2020, build ID: 0589419c-738b-4676-a374-18f7bbc7ac23, output base: /home/johndoe/.cache/bazel/_bazel_johndoe/d8eb7a85967b22409442664d380222c0 - -=== PHASE SUMMARY INFORMATION === - -Total launch phase time 1.070 s 12.95% -Total init phase time 0.299 s 3.62% -Total loading phase time 0.878 s 10.64% -Total analysis phase time 1.319 s 15.98% -Total preparation phase time 0.047 s 0.57% -Total execution phase time 4.629 s 56.05% -Total finish phase time 0.014 s 0.18% ------------------------------------------------- -Total run time 8.260 s 100.00% - -Critical path (4.245 s): - Time Percentage Description - 8.85 ms 0.21% _Ccompiler_Udeps for @local_config_cc// compiler_deps - 3.839 s 90.44% action 'Compiling external/com_google_protobuf/src/google/protobuf/compiler/php/php_generator.cc [for host]' - 270 ms 6.36% action 'Linking external/com_google_protobuf/protoc [for host]' - 0.25 ms 0.01% runfiles for @com_google_protobuf// protoc - 126 ms 2.97% action 'ProtoCompile external/com_google_protobuf/python/google/protobuf/compiler/plugin_pb2.py' - 0.96 ms 0.02% runfiles for //tools/aquery_differ aquery_differ -``` - -### Bazel Invocation Analyzer - -The open-source -[Bazel Invocation Analyzer](https://github.com/EngFlow/bazel_invocation_analyzer) -consumes a profile format and prints suggestions on how to improve -the build’s performance. This analysis can be performed using its CLI or on -[https://analyzer.engflow.com](https://analyzer.engflow.com). - -### `jq` - -`jq` is like `sed` for JSON data. An example usage of `jq` to extract all -durations of the sandbox creation step in local action execution: - -``` -$ zcat $(../bazel-6.0.0rc1-linux-x86_64 info output_base)/command.profile.gz | jq '.traceEvents | .[] | select(.name == "sandbox.createFileSystem") | .dur' -6378 -7247 -11850 -13756 -6555 -7445 -8487 -15520 -[...] -``` - -## Profile information - -The profile contains multiple rows. Usually the bulk of rows represent Bazel -threads and their corresponding events, but some special rows are also included. - -The special rows included depend on the version of Bazel invoked when the -profile was created, and may be customized by different flags. - -Figure 1 shows a profile created with Bazel v5.3.1 and includes these rows: - -* `action count`: Displays how many concurrent actions were in flight. Click - on it to see the actual value. Should go up to the value of - [`--jobs`](/reference/command-line-reference#flag--jobs) in clean - builds. -* `CPU usage (Bazel)`: For each second of the build, displays the amount of - CPU that was used by Bazel (a value of 1 equals one core being 100% busy). -* `Critical Path`: Displays one block for each action on the critical path. -* `Main Thread`: Bazel’s main thread. Useful to get a high-level picture of - what Bazel is doing, for example "Launch Blaze", "evaluateTargetPatterns", - and "runAnalysisPhase". -* `Garbage Collector`: Displays minor and major Garbage Collection (GC) - pauses. - -## Common performance issues - -When analyzing performance profiles, look for: - -* Slower than expected analysis phase (`runAnalysisPhase`), especially on - incremental builds. This can be a sign of a poor rule implementation, for - example one that flattens depsets. Package loading can be slow by an - excessive amount of targets, complex macros or recursive globs. -* Individual slow actions, especially those on the critical path. It might be - possible to split large actions into multiple smaller actions or reduce the - set of (transitive) dependencies to speed them up. Also check for an unusual - high non-`PROCESS_TIME` (such as `REMOTE_SETUP` or `FETCH`). -* Bottlenecks, that is a small number of threads is busy while all others are - idling / waiting for the result (see around 22s and 29s in Figure 1). - Optimizing this will most likely require touching the rule implementations - or Bazel itself to introduce more parallelism. This can also happen when - there is an unusual amount of GC. - -## Profile file format - -The top-level object contains metadata (`otherData`) and the actual tracing data -(`traceEvents`). The metadata contains extra info, for example the invocation ID -and date of the Bazel invocation. - -Example: - -```json -{ - "otherData": { - "build_id": "101bff9a-7243-4c1a-8503-9dc6ae4c3b05", - "date": "Wed Oct 26 08:22:35 CEST 2022", - "profile_finish_ts": "1677666095162000", - "output_base": "/usr/local/google/_bazel_johndoe/573d4be77eaa72b91a3dfaa497bf8cd0" - }, - "traceEvents": [ - {"name":"thread_name","ph":"M","pid":1,"tid":0,"args":{"name":"Critical Path"}}, - ... - {"cat":"build phase marker","name":"Launch Blaze","ph":"X","ts":-1306000,"dur":1306000,"pid":1,"tid":21}, - ... - {"cat":"package creation","name":"foo","ph":"X","ts":2685358,"dur":784,"pid":1,"tid":246}, - ... - {"name":"thread_name","ph":"M","pid":1,"tid":11,"args":{"name":"Garbage Collector"}}, - {"cat":"gc notification","name":"minor GC","ph":"X","ts":825986,"dur":11000,"pid":1,"tid":11}, - ... - {"cat":"action processing","name":"Compiling foo/bar.c","ph":"X","ts":54413389,"dur":357594,"pid":1,"args":{"mnemonic":"CppCompile"},"tid":341}, - ] -} -``` - -Timestamps (`ts`) and durations (`dur`) in the trace events are given in -microseconds. The category (`cat`) is one of enum values of `ProfilerTask`. -Note that some events are merged together if they are very short and close to -each other; pass -[`--noslim_profile`](/reference/command-line-reference#flag--slim_profile) -if you would like to prevent event merging. - -See also the -[Chrome Trace Event Format Specification](https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview). diff --git a/8.4.2/advanced/performance/memory.mdx b/8.4.2/advanced/performance/memory.mdx deleted file mode 100644 index 844e691..0000000 --- a/8.4.2/advanced/performance/memory.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Optimize Memory' ---- - - - -This page describes how to limit and reduce the memory Bazel uses. - -## Running Bazel with Limited RAM - -In certain situations, you may want Bazel to use minimal memory. You can set the -maximum heap via the startup flag -[`--host_jvm_args`](/docs/user-manual#host-jvm-args), -like `--host_jvm_args=-Xmx2g`. - -### Trade incremental build speeds for memory - -If your builds are too big, Bazel may throw an `OutOfMemoryError` (OOM) when -it doesn't have enough memory. You can make Bazel use less memory, at the cost -of slower incremental builds, by passing the following command flags: -[`--discard_analysis_cache`](/docs/user-manual#discard-analysis-cache), -[`--nokeep_state_after_build`](/reference/command-line-reference#flag--keep_state_after_build), -and -[`--notrack_incremental_state`](/reference/command-line-reference#flag--track_incremental_state). - -These flags will minimize the memory that Bazel uses in a build, at the cost of -making future builds slower than a standard incremental build would be. - -You can also pass any one of these flags individually: - - * `--discard_analysis_cache` will reduce the memory used during execution (not -analysis). Incremental builds will not have to redo package loading, but will -have to redo analysis and execution (although the on-disk action cache can -prevent most re-execution). - * `--notrack_incremental_state` will not store any edges in Bazel's internal - dependency graph, so that it is unusable for incremental builds. The next build - will discard that data, but it is preserved until then, for internal debugging, - unless `--nokeep_state_after_build` is specified. - * `--nokeep_state_after_build` will discard all data after the build, so that - incremental builds have to build from scratch (except for the on-disk action - cache). Alone, it does not affect the high-water mark of the current build. - -### Trade build flexibility for memory with Skyfocus (Experimental) - -If you want to make Bazel use less memory *and* retain incremental build speeds, -you can tell Bazel the working set of files that you will be modifying, and -Bazel will only keep state needed to correctly incrementally rebuild changes to -those files. This feature is called **Skyfocus**. - -To use Skyfocus, pass the `--experimental_enable_skyfocus` flag: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus -``` - -By default, the working set will be the set of files next to the target being -built. In the example, all files in `//pkg` will be kept in the working set, and -changes to files outside of the working set will be disallowed, until you issue -`bazel clean` or restart the Bazel server. - -If you want to specify an exact set of files or directories, use the -`--experimental_working_set` flag, like so: - -```sh -bazel build //pkg:target --experimental_enable_skyfocus ---experimental_working_set=path/to/another/dir,path/to/tests/dir -``` - -You can also pass `--experimental_skyfocus_dump_post_gc_stats` to show the -memory reduction amount: - -Putting it altogether, you should see something like this: - -```none -$ bazel test //pkg:target //tests/... --experimental_enable_skyfocus --experimental_working_set dir1,dir2,dir3/subdir --experimental_skyfocus_dump_post_gc_stats -INFO: --experimental_enable_skyfocus is enabled. Blaze will reclaim memory not needed to build the working set. Run 'blaze dump --skyframe=working_set' to show the working set, after this command. -WARNING: Changes outside of the working set will cause a build error. -INFO: Analyzed 149 targets (4533 packages loaded, 169438 targets configured). -INFO: Found 25 targets and 124 test targets... -INFO: Updated working set successfully. -INFO: Focusing on 334 roots, 3 leafs... (use --experimental_skyfocus_dump_keys to show them) -INFO: Heap: 1237MB -> 676MB (-45.31%) -INFO: Elapsed time: 192.670s ... -INFO: Build completed successfully, 62303 total actions -``` - -For this example, using Skyfocus allowed Bazel to drop 561MB (45%) of memory, -and incremental builds to handle changes to files under `dir1`, `dir2`, and -`dir3/subdir` will retain their fast speeds, with the tradeoff that Bazel cannot -rebuild changed files outside of these directories. - -## Memory Profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. Read more about this process on the -[Memory Profiling section](/rules/performance#memory-profiling) of our -documentation on how to improve the performance of custom rules. diff --git a/8.4.2/basics/artifact-based-builds.mdx b/8.4.2/basics/artifact-based-builds.mdx deleted file mode 100644 index 79f3514..0000000 --- a/8.4.2/basics/artifact-based-builds.mdx +++ /dev/null @@ -1,279 +0,0 @@ ---- -title: 'Artifact-Based Build Systems' ---- - - - -This page covers artifact-based build systems and the philosophy behind their -creation. Bazel is an artifact-based build system. While task-based build -systems are good step above build scripts, they give too much power to -individual engineers by letting them define their own tasks. - -Artifact-based build systems have a small number of tasks defined by the system -that engineers can configure in a limited way. Engineers still tell the system -**what** to build, but the build system determines **how** to build it. As with -task-based build systems, artifact-based build systems, such as Bazel, still -have buildfiles, but the contents of those buildfiles are very different. Rather -than being an imperative set of commands in a Turing-complete scripting language -describing how to produce an output, buildfiles in Bazel are a declarative -manifest describing a set of artifacts to build, their dependencies, and a -limited set of options that affect how they’re built. When engineers run `bazel` -on the command line, they specify a set of targets to build (the **what**), and -Bazel is responsible for configuring, running, and scheduling the compilation -steps (the **how**). Because the build system now has full control over what -tools to run when, it can make much stronger guarantees that allow it to be far -more efficient while still guaranteeing correctness. - -## A functional perspective - -It’s easy to make an analogy between artifact-based build systems and functional -programming. Traditional imperative programming languages (such as, Java, C, and -Python) specify lists of statements to be executed one after another, in the -same way that task-based build systems let programmers define a series of steps -to execute. Functional programming languages (such as, Haskell and ML), in -contrast, are structured more like a series of mathematical equations. In -functional languages, the programmer describes a computation to perform, but -leaves the details of when and exactly how that computation is executed to the -compiler. - -This maps to the idea of declaring a manifest in an artifact-based build system -and letting the system figure out how to execute the build. Many problems can't -be easily expressed using functional programming, but the ones that do benefit -greatly from it: the language is often able to trivially parallelize such -programs and make strong guarantees about their correctness that would be -impossible in an imperative language. The easiest problems to express using -functional programming are the ones that simply involve transforming one piece -of data into another using a series of rules or functions. And that’s exactly -what a build system is: the whole system is effectively a mathematical function -that takes source files (and tools like the compiler) as inputs and produces -binaries as outputs. So, it’s not surprising that it works well to base a build -system around the tenets of functional programming. - -## Understanding artifact-based build systems - -Google's build system, Blaze, was the first artifact-based build system. Bazel -is the open-sourced version of Blaze. - -Here’s what a buildfile (normally named `BUILD`) looks like in Bazel: - -```python -java_binary( - name = "MyBinary", - srcs = ["MyBinary.java"], - deps = [ - ":mylib", - ], -) -java_library( - name = "mylib", - srcs = ["MyLibrary.java", "MyHelper.java"], - visibility = ["//java/com/example/myproduct:__subpackages__"], - deps = [ - "//java/com/example/common", - "//java/com/example/myproduct/otherlib", - ], -) -``` - -In Bazel, `BUILD` files define targets—the two types of targets here are -`java_binary` and `java_library`. Every target corresponds to an artifact that -can be created by the system: binary targets produce binaries that can be -executed directly, and library targets produce libraries that can be used by -binaries or other libraries. Every target has: - -* `name`: how the target is referenced on the command line and by other - targets -* `srcs`: the source files to compiled to create the artifact for the target -* `deps`: other targets that must be built before this target and linked into - it - -Dependencies can either be within the same package (such as `MyBinary`’s -dependency on `:mylib`) or on a different package in the same source hierarchy -(such as `mylib`’s dependency on `//java/com/example/common`). - -As with task-based build systems, you perform builds using Bazel’s command-line -tool. To build the `MyBinary` target, you run `bazel build :MyBinary`. After -entering that command for the first time in a clean repository, Bazel: - -1. Parses every `BUILD` file in the workspace to create a graph of dependencies - among artifacts. -1. Uses the graph to determine the transitive dependencies of `MyBinary`; that - is, every target that `MyBinary` depends on and every target that those - targets depend on, recursively. -1. Builds each of those dependencies, in order. Bazel starts by building each - target that has no other dependencies and keeps track of which dependencies - still need to be built for each target. As soon as all of a target’s - dependencies are built, Bazel starts building that target. This process - continues until every one of `MyBinary`’s transitive dependencies have been - built. -1. Builds `MyBinary` to produce a final executable binary that links in all of - the dependencies that were built in step 3. - -Fundamentally, it might not seem like what’s happening here is that much -different than what happened when using a task-based build system. Indeed, the -end result is the same binary, and the process for producing it involved -analyzing a bunch of steps to find dependencies among them, and then running -those steps in order. But there are critical differences. The first one appears -in step 3: because Bazel knows that each target only produces a Java library, it -knows that all it has to do is run the Java compiler rather than an arbitrary -user-defined script, so it knows that it’s safe to run these steps in parallel. -This can produce an order of magnitude performance improvement over building -targets one at a time on a multicore machine, and is only possible because the -artifact-based approach leaves the build system in charge of its own execution -strategy so that it can make stronger guarantees about parallelism. - -The benefits extend beyond parallelism, though. The next thing that this -approach gives us becomes apparent when the developer types `bazel -build :MyBinary` a second time without making any changes: Bazel exits in less -than a second with a message saying that the target is up to date. This is -possible due to the functional programming paradigm we talked about -earlier—Bazel knows that each target is the result only of running a Java -compiler, and it knows that the output from the Java compiler depends only on -its inputs, so as long as the inputs haven’t changed, the output can be reused. -And this analysis works at every level; if `MyBinary.java` changes, Bazel knows -to rebuild `MyBinary` but reuse `mylib`. If a source file for -`//java/com/example/common` changes, Bazel knows to rebuild that library, -`mylib`, and `MyBinary`, but reuse `//java/com/example/myproduct/otherlib`. -Because Bazel knows about the properties of the tools it runs at every step, -it’s able to rebuild only the minimum set of artifacts each time while -guaranteeing that it won’t produce stale builds. - -Reframing the build process in terms of artifacts rather than tasks is subtle -but powerful. By reducing the flexibility exposed to the programmer, the build -system can know more about what is being done at every step of the build. It can -use this knowledge to make the build far more efficient by parallelizing build -processes and reusing their outputs. But this is really just the first step, and -these building blocks of parallelism and reuse form the basis for a distributed -and highly scalable build system. - -## Other nifty Bazel tricks - -Artifact-based build systems fundamentally solve the problems with parallelism -and reuse that are inherent in task-based build systems. But there are still a -few problems that came up earlier that we haven’t addressed. Bazel has clever -ways of solving each of these, and we should discuss them before moving on. - -### Tools as dependencies - -One problem we ran into earlier was that builds depended on the tools installed -on our machine, and reproducing builds across systems could be difficult due to -different tool versions or locations. The problem becomes even more difficult -when your project uses languages that require different tools based on which -platform they’re being built on or compiled for (such as, Windows versus Linux), -and each of those platforms requires a slightly different set of tools to do the -same job. - -Bazel solves the first part of this problem by treating tools as dependencies to -each target. Every `java_library` in the workspace implicitly depends on a Java -compiler, which defaults to a well-known compiler. Whenever Bazel builds a -`java_library`, it checks to make sure that the specified compiler is available -at a known location. Just like any other dependency, if the Java compiler -changes, every artifact that depends on it is rebuilt. - -Bazel solves the second part of the problem, platform independence, by setting -[build configurations](/run/build#build-config-cross-compilation). Rather than -targets depending directly on their tools, they depend on types of configurations: - -* **Host configuration**: building tools that run during the build -* **Target configuration**: building the binary you ultimately requested - -### Extending the build system - -Bazel comes with targets for several popular programming languages out of the -box, but engineers will always want to do more—part of the benefit of task-based -systems is their flexibility in supporting any kind of build process, and it -would be better not to give that up in an artifact-based build system. -Fortunately, Bazel allows its supported target types to be extended by -[adding custom rules](/extending/rules). - -To define a rule in Bazel, the rule author declares the inputs that the rule -requires (in the form of attributes passed in the `BUILD` file) and the fixed -set of outputs that the rule produces. The author also defines the actions that -will be generated by that rule. Each action declares its inputs and outputs, -runs a particular executable or writes a particular string to a file, and can be -connected to other actions via its inputs and outputs. This means that actions -are the lowest-level composable unit in the build system—an action can do -whatever it wants so long as it uses only its declared inputs and outputs, and -Bazel takes care of scheduling actions and caching their results as appropriate. - -The system isn’t foolproof given that there’s no way to stop an action developer -from doing something like introducing a nondeterministic process as part of -their action. But this doesn’t happen very often in practice, and pushing the -possibilities for abuse all the way down to the action level greatly decreases -opportunities for errors. Rules supporting many common languages and tools are -widely available online, and most projects will never need to define their own -rules. Even for those that do, rule definitions only need to be defined in one -central place in the repository, meaning most engineers will be able to use -those rules without ever having to worry about their implementation. - -### Isolating the environment - -Actions sound like they might run into the same problems as tasks in other -systems—isn’t it still possible to write actions that both write to the same -file and end up conflicting with one another? Actually, Bazel makes these -conflicts impossible by using _[sandboxing](/docs/sandboxing)_. On supported -systems, every action is isolated from every other action via a filesystem -sandbox. Effectively, each action can see only a restricted view of the -filesystem that includes the inputs it has declared and any outputs it has -produced. This is enforced by systems such as LXC on Linux, the same technology -behind Docker. This means that it’s impossible for actions to conflict with one -another because they are unable to read any files they don’t declare, and any -files that they write but don’t declare will be thrown away when the action -finishes. Bazel also uses sandboxes to restrict actions from communicating via -the network. - -### Making external dependencies deterministic - -There’s still one problem remaining: build systems often need to download -dependencies (whether tools or libraries) from external sources rather than -directly building them. This can be seen in the example via the -`@com_google_common_guava_guava//jar` dependency, which downloads a `JAR` file -from Maven. - -Depending on files outside of the current workspace is risky. Those files could -change at any time, potentially requiring the build system to constantly check -whether they’re fresh. If a remote file changes without a corresponding change -in the workspace source code, it can also lead to unreproducible builds—a build -might work one day and fail the next for no obvious reason due to an unnoticed -dependency change. Finally, an external dependency can introduce a huge security -risk when it is owned by a third party: if an attacker is able to infiltrate -that third-party server, they can replace the dependency file with something of -their own design, potentially giving them full control over your build -environment and its output. - -The fundamental problem is that we want the build system to be aware of these -files without having to check them into source control. Updating a dependency -should be a conscious choice, but that choice should be made once in a central -place rather than managed by individual engineers or automatically by the -system. This is because even with a “Live at Head” model, we still want builds -to be deterministic, which implies that if you check out a commit from last -week, you should see your dependencies as they were then rather than as they are -now. - -Bazel and some other build systems address this problem by requiring a -workspacewide manifest file that lists a _cryptographic hash_ for every external -dependency in the workspace. The hash is a concise way to uniquely represent the -file without checking the entire file into source control. Whenever a new -external dependency is referenced from a workspace, that dependency’s hash is -added to the manifest, either manually or automatically. When Bazel runs a -build, it checks the actual hash of its cached dependency against the expected -hash defined in the manifest and redownloads the file only if the hash differs. - -If the artifact we download has a different hash than the one declared in the -manifest, the build will fail unless the hash in the manifest is updated. This -can be done automatically, but that change must be approved and checked into -source control before the build will accept the new dependency. This means that -there’s always a record of when a dependency was updated, and an external -dependency can’t change without a corresponding change in the workspace source. -It also means that, when checking out an older version of the source code, the -build is guaranteed to use the same dependencies that it was using at the point -when that version was checked in (or else it will fail if those dependencies are -no longer available). - -Of course, it can still be a problem if a remote server becomes unavailable or -starts serving corrupt data—this can cause all of your builds to begin failing -if you don’t have another copy of that dependency available. To avoid this -problem, we recommend that, for any nontrivial project, you mirror all of its -dependencies onto servers or services that you trust and control. Otherwise you -will always be at the mercy of a third party for your build system’s -availability, even if the checked-in hashes guarantee its security. diff --git a/8.4.2/basics/build-systems.mdx b/8.4.2/basics/build-systems.mdx deleted file mode 100644 index b3c6338..0000000 --- a/8.4.2/basics/build-systems.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Why a Build System?' ---- - - - -This page discusses what build systems are, what they do, why you should use a -build system, and why compilers and build scripts aren't the best choice as your -organization starts to scale. It's intended for developers who don't have much -experience with a build system. - -## What is a build system? - -Fundamentally, all build systems have a straightforward purpose: they transform -the source code written by engineers into executable binaries that can be read -by machines. Build systems aren't just for human-authored code; they also allow -machines to create builds automatically, whether for testing or for releases to -production. In an organization with thousands of engineers, it's common that -most builds are triggered automatically rather than directly by engineers. - -### Can't I just use a compiler? - -The need for a build system might not be immediately obvious. Most engineers -don't use a build system while learning to code: most start by invoking tools -like `gcc` or `javac` directly from the command line, or the equivalent in an -integrated development environment (IDE). As long as all the source code is in -the same directory, a command like this works fine: - -```posix-terminal -javac *.java -``` - -This instructs the Java compiler to take every Java source file in the current -directory and turn it into a binary class file. In the simplest case, this is -all you need. - -However, as soon as code expands, the complications begin. `javac` is smart -enough to look in subdirectories of the current directory to find code to -import. But it has no way of finding code stored in _other parts_ of the -filesystem (perhaps a library shared by several projects). It also only knows -how to build Java code. Large systems often involve different pieces written in -a variety of programming languages with webs of dependencies among those pieces, -meaning no compiler for a single language can possibly build the entire system. - -Once you're dealing with code from multiple languages or multiple compilation -units, building code is no longer a one-step process. Now you must evaluate what -your code depends on and build those pieces in the proper order, possibly using -a different set of tools for each piece. If any dependencies change, you must -repeat this process to avoid depending on stale binaries. For a codebase of even -moderate size, this process quickly becomes tedious and error-prone. - -The compiler also doesn’t know anything about how to handle external -dependencies, such as third-party `JAR` files in Java. Without a build system, -you could manage this by downloading the dependency from the internet, sticking -it in a `lib` folder on the hard drive, and configuring the compiler to read -libraries from that directory. Over time, it's difficult to maintain the -updates, versions, and source of these external dependencies. - -### What about shell scripts? - -Suppose that your hobby project starts out simple enough that you can build it -using just a compiler, but you begin running into some of the problems described -previously. Maybe you still don’t think you need a build system and can automate -away the tedious parts using some simple shell scripts that take care of -building things in the correct order. This helps out for a while, but pretty -soon you start running into even more problems: - -* It becomes tedious. As your system grows more complex, you begin spending - almost as much time working on your build scripts as on real code. Debugging - shell scripts is painful, with more and more hacks being layered on top of - one another. - -* It’s slow. To make sure you weren’t accidentally relying on stale libraries, - you have your build script build every dependency in order every time you - run it. You think about adding some logic to detect which parts need to be - rebuilt, but that sounds awfully complex and error prone for a script. Or - you think about specifying which parts need to be rebuilt each time, but - then you’re back to square one. - -* Good news: it’s time for a release! Better go figure out all the arguments - you need to pass to the jar command to make your final build. And remember - how to upload it and push it out to the central repository. And build and - push the documentation updates, and send out a notification to users. Hmm, - maybe this calls for another script... - -* Disaster! Your hard drive crashes, and now you need to recreate your entire - system. You were smart enough to keep all of your source files in version - control, but what about those libraries you downloaded? Can you find them - all again and make sure they were the same version as when you first - downloaded them? Your scripts probably depended on particular tools being - installed in particular places—can you restore that same environment so that - the scripts work again? What about all those environment variables you set a - long time ago to get the compiler working just right and then forgot about? - -* Despite the problems, your project is successful enough that you’re able to - begin hiring more engineers. Now you realize that it doesn’t take a disaster - for the previous problems to arise—you need to go through the same painful - bootstrapping process every time a new developer joins your team. And - despite your best efforts, there are still small differences in each - person’s system. Frequently, what works on one person’s machine doesn’t work - on another’s, and each time it takes a few hours of debugging tool paths or - library versions to figure out where the difference is. - -* You decide that you need to automate your build system. In theory, this is - as simple as getting a new computer and setting it up to run your build - script every night using cron. You still need to go through the painful - setup process, but now you don’t have the benefit of a human brain being - able to detect and resolve minor problems. Now, every morning when you get - in, you see that last night’s build failed because yesterday a developer - made a change that worked on their system but didn’t work on the automated - build system. Each time it’s a simple fix, but it happens so often that you - end up spending a lot of time each day discovering and applying these simple - fixes. - -* Builds become slower and slower as the project grows. One day, while waiting - for a build to complete, you gaze mournfully at the idle desktop of your - coworker, who is on vacation, and wish there were a way to take advantage of - all that wasted computational power. - -You’ve run into a classic problem of scale. For a single developer working on at -most a couple hundred lines of code for at most a week or two (which might have -been the entire experience thus far of a junior developer who just graduated -university), a compiler is all you need. Scripts can maybe take you a little bit -farther. But as soon as you need to coordinate across multiple developers and -their machines, even a perfect build script isn’t enough because it becomes very -difficult to account for the minor differences in those machines. At this point, -this simple approach breaks down and it’s time to invest in a real build system. diff --git a/8.4.2/basics/dependencies.mdx b/8.4.2/basics/dependencies.mdx deleted file mode 100644 index 1d3bf8f..0000000 --- a/8.4.2/basics/dependencies.mdx +++ /dev/null @@ -1,301 +0,0 @@ ---- -title: 'Dependency Management' ---- - - - -In looking through the previous pages, one theme repeats over and over: managing -your own code is fairly straightforward, but managing its dependencies is much -more difficult. There are all sorts of dependencies: sometimes there’s a -dependency on a task (such as “push the documentation before I mark a release as -complete”), and sometimes there’s a dependency on an artifact (such as “I need -to have the latest version of the computer vision library to build my code”). -Sometimes, you have internal dependencies on another part of your codebase, and -sometimes you have external dependencies on code or data owned by another team -(either in your organization or a third party). But in any case, the idea of “I -need that before I can have this” is something that recurs repeatedly in the -design of build systems, and managing dependencies is perhaps the most -fundamental job of a build system. - -## Dealing with Modules and Dependencies - -Projects that use artifact-based build systems like Bazel are broken into a set -of modules, with modules expressing dependencies on one another via `BUILD` -files. Proper organization of these modules and dependencies can have a huge -effect on both the performance of the build system and how much work it takes to -maintain. - -## Using Fine-Grained Modules and the 1:1:1 Rule - -The first question that comes up when structuring an artifact-based build is -deciding how much functionality an individual module should encompass. In Bazel, -a _module_ is represented by a target specifying a buildable unit like a -`java_library` or a `go_binary`. At one extreme, the entire project could be -contained in a single module by putting one `BUILD` file at the root and -recursively globbing together all of that project’s source files. At the other -extreme, nearly every source file could be made into its own module, effectively -requiring each file to list in a `BUILD` file every other file it depends on. - -Most projects fall somewhere between these extremes, and the choice involves a -trade-off between performance and maintainability. Using a single module for the -entire project might mean that you never need to touch the `BUILD` file except -when adding an external dependency, but it means that the build system must -always build the entire project all at once. This means that it won’t be able to -parallelize or distribute parts of the build, nor will it be able to cache parts -that it’s already built. One-module-per-file is the opposite: the build system -has the maximum flexibility in caching and scheduling steps of the build, but -engineers need to expend more effort maintaining lists of dependencies whenever -they change which files reference which. - -Though the exact granularity varies by language (and often even within -language), Google tends to favor significantly smaller modules than one might -typically write in a task-based build system. A typical production binary at -Google often depends on tens of thousands of targets, and even a moderate-sized -team can own several hundred targets within its codebase. For languages like -Java that have a strong built-in notion of packaging, each directory usually -contains a single package, target, and `BUILD` file (Pants, another build system -based on Bazel, calls this the 1:1:1 rule). Languages with weaker packaging -conventions frequently define multiple targets per `BUILD` file. - -The benefits of smaller build targets really begin to show at scale because they -lead to faster distributed builds and a less frequent need to rebuild targets. -The advantages become even more compelling after testing enters the picture, as -finer-grained targets mean that the build system can be much smarter about -running only a limited subset of tests that could be affected by any given -change. Because Google believes in the systemic benefits of using smaller -targets, we’ve made some strides in mitigating the downside by investing in -tooling to automatically manage `BUILD` files to avoid burdening developers. - -Some of these tools, such as `buildifier` and `buildozer`, are available with -Bazel in the [`buildtools` -directory](https://github.com/bazelbuild/buildtools). - -## Minimizing Module Visibility - -Bazel and other build systems allow each target to specify a visibility — a -property that determines which other targets may depend on it. A private target -can only be referenced within its own `BUILD` file. A target may grant broader -visibility to the targets of an explicitly defined list of `BUILD` files, or, in -the case of public visibility, to every target in the workspace. - -As with most programming languages, it is usually best to minimize visibility as -much as possible. Generally, teams at Google will make targets public only if -those targets represent widely used libraries available to any team at Google. -Teams that require others to coordinate with them before using their code will -maintain an allowlist of customer targets as their target’s visibility. Each -team’s internal implementation targets will be restricted to only directories -owned by the team, and most `BUILD` files will have only one target that isn’t -private. - -## Managing Dependencies - -Modules need to be able to refer to one another. The downside of breaking a -codebase into fine-grained modules is that you need to manage the dependencies -among those modules (though tools can help automate this). Expressing these -dependencies usually ends up being the bulk of the content in a `BUILD` file. - -### Internal dependencies - -In a large project broken into fine-grained modules, most dependencies are -likely to be internal; that is, on another target defined and built in the same -source repository. Internal dependencies differ from external dependencies in -that they are built from source rather than downloaded as a prebuilt artifact -while running the build. This also means that there’s no notion of “version” for -internal dependencies—a target and all of its internal dependencies are always -built at the same commit/revision in the repository. One issue that should be -handled carefully with regard to internal dependencies is how to treat -transitive dependencies (Figure 1). Suppose target A depends on target B, which -depends on a common library target C. Should target A be able to use classes -defined in target C? - -[![Transitive -dependencies](/images/transitive-dependencies.png)](/images/transitive-dependencies.png) - -**Figure 1**. Transitive dependencies - -As far as the underlying tools are concerned, there’s no problem with this; both -B and C will be linked into target A when it is built, so any symbols defined in -C are known to A. Bazel allowed this for many years, but as Google grew, we -began to see problems. Suppose that B was refactored such that it no longer -needed to depend on C. If B’s dependency on C was then removed, A and any other -target that used C via a dependency on B would break. Effectively, a target’s -dependencies became part of its public contract and could never be safely -changed. This meant that dependencies accumulated over time and builds at Google -started to slow down. - -Google eventually solved this issue by introducing a “strict transitive -dependency mode” in Bazel. In this mode, Bazel detects whether a target tries to -reference a symbol without depending on it directly and, if so, fails with an -error and a shell command that can be used to automatically insert the -dependency. Rolling this change out across Google’s entire codebase and -refactoring every one of our millions of build targets to explicitly list their -dependencies was a multiyear effort, but it was well worth it. Our builds are -now much faster given that targets have fewer unnecessary dependencies, and -engineers are empowered to remove dependencies they don’t need without worrying -about breaking targets that depend on them. - -As usual, enforcing strict transitive dependencies involved a trade-off. It made -build files more verbose, as frequently used libraries now need to be listed -explicitly in many places rather than pulled in incidentally, and engineers -needed to spend more effort adding dependencies to `BUILD` files. We’ve since -developed tools that reduce this toil by automatically detecting many missing -dependencies and adding them to a `BUILD` files without any developer -intervention. But even without such tools, we’ve found the trade-off to be well -worth it as the codebase scales: explicitly adding a dependency to `BUILD` file -is a one-time cost, but dealing with implicit transitive dependencies can cause -ongoing problems as long as the build target exists. Bazel [enforces strict -transitive -dependencies](https://blog.bazel.build/2017/06/28/sjd-unused_deps.html) -on Java code by default. - -### External dependencies - -If a dependency isn’t internal, it must be external. External dependencies are -those on artifacts that are built and stored outside of the build system. The -dependency is imported directly from an artifact repository (typically accessed -over the internet) and used as-is rather than being built from source. One of -the biggest differences between external and internal dependencies is that -external dependencies have versions, and those versions exist independently of -the project’s source code. - -### Automatic versus manual dependency management - -Build systems can allow the versions of external dependencies to be managed -either manually or automatically. When managed manually, the buildfile -explicitly lists the version it wants to download from the artifact repository, -often using a [semantic version string](https://semver.org/) such -as `1.1.4`. When managed automatically, the source file specifies a range of -acceptable versions, and the build system always downloads the latest one. For -example, Gradle allows a dependency version to be declared as “1.+” to specify -that any minor or patch version of a dependency is acceptable so long as the -major version is 1. - -Automatically managed dependencies can be convenient for small projects, but -they’re usually a recipe for disaster on projects of nontrivial size or that are -being worked on by more than one engineer. The problem with automatically -managed dependencies is that you have no control over when the version is -updated. There’s no way to guarantee that external parties won’t make breaking -updates (even when they claim to use semantic versioning), so a build that -worked one day might be broken the next with no easy way to detect what changed -or to roll it back to a working state. Even if the build doesn’t break, there -can be subtle behavior or performance changes that are impossible to track down. - -In contrast, because manually managed dependencies require a change in source -control, they can be easily discovered and rolled back, and it’s possible to -check out an older version of the repository to build with older dependencies. -Bazel requires that versions of all dependencies be specified manually. At even -moderate scales, the overhead of manual version management is well worth it for -the stability it provides. - -### The One-Version Rule - -Different versions of a library are usually represented by different artifacts, -so in theory there’s no reason that different versions of the same external -dependency couldn’t both be declared in the build system under different names. -That way, each target could choose which version of the dependency it wanted to -use. This causes a lot of problems in practice, so Google enforces a strict -[One-Version -Rule](https://opensource.google/docs/thirdparty/oneversion/) for -all third-party dependencies in our codebase. - -The biggest problem with allowing multiple versions is the diamond dependency -issue. Suppose that target A depends on target B and on v1 of an external -library. If target B is later refactored to add a dependency on v2 of the same -external library, target A will break because it now depends implicitly on two -different versions of the same library. Effectively, it’s never safe to add a -new dependency from a target to any third-party library with multiple versions, -because any of that target’s users could already be depending on a different -version. Following the One-Version Rule makes this conflict impossible—if a -target adds a dependency on a third-party library, any existing dependencies -will already be on that same version, so they can happily coexist. - -### Transitive external dependencies - -Dealing with the transitive dependencies of an external dependency can be -particularly difficult. Many artifact repositories such as Maven Central, allow -artifacts to specify dependencies on particular versions of other artifacts in -the repository. Build tools like Maven or Gradle often recursively download each -transitive dependency by default, meaning that adding a single dependency in -your project could potentially cause dozens of artifacts to be downloaded in -total. - -This is very convenient: when adding a dependency on a new library, it would be -a big pain to have to track down each of that library’s transitive dependencies -and add them all manually. But there’s also a huge downside: because different -libraries can depend on different versions of the same third-party library, this -strategy necessarily violates the One-Version Rule and leads to the diamond -dependency problem. If your target depends on two external libraries that use -different versions of the same dependency, there’s no telling which one you’ll -get. This also means that updating an external dependency could cause seemingly -unrelated failures throughout the codebase if the new version begins pulling in -conflicting versions of some of its dependencies. - -Bazel did not use to automatically download transitive dependencies. It used to -employ a `WORKSPACE` file that required all transitive dependencies to be -listed, which led to a lot of pain when managing external dependencies. Bazel -has since added support for automatic transitive external dependency management -in the form of the `MODULE.bazel` file. See [external dependency -overview](/external/overview) for more details. - -Yet again, the choice here is one between convenience and scalability. Small -projects might prefer not having to worry about managing transitive dependencies -themselves and might be able to get away with using automatic transitive -dependencies. This strategy becomes less and less appealing as the organization -and codebase grows, and conflicts and unexpected results become more and more -frequent. At larger scales, the cost of manually managing dependencies is much -less than the cost of dealing with issues caused by automatic dependency -management. - -### Caching build results using external dependencies - -External dependencies are most often provided by third parties that release -stable versions of libraries, perhaps without providing source code. Some -organizations might also choose to make some of their own code available as -artifacts, allowing other pieces of code to depend on them as third-party rather -than internal dependencies. This can theoretically speed up builds if artifacts -are slow to build but quick to download. - -However, this also introduces a lot of overhead and complexity: someone needs to -be responsible for building each of those artifacts and uploading them to the -artifact repository, and clients need to ensure that they stay up to date with -the latest version. Debugging also becomes much more difficult because different -parts of the system will have been built from different points in the -repository, and there is no longer a consistent view of the source tree. - -A better way to solve the problem of artifacts taking a long time to build is to -use a build system that supports remote caching, as described earlier. Such a -build system saves the resulting artifacts from every build to a location that -is shared across engineers, so if a developer depends on an artifact that was -recently built by someone else, the build system automatically downloads it -instead of building it. This provides all of the performance benefits of -depending directly on artifacts while still ensuring that builds are as -consistent as if they were always built from the same source. This is the -strategy used internally by Google, and Bazel can be configured to use a remote -cache. - -### Security and reliability of external dependencies - -Depending on artifacts from third-party sources is inherently risky. There’s an -availability risk if the third-party source (such as an artifact repository) -goes down, because your entire build might grind to a halt if it’s unable to -download an external dependency. There’s also a security risk: if the -third-party system is compromised by an attacker, the attacker could replace the -referenced artifact with one of their own design, allowing them to inject -arbitrary code into your build. Both problems can be mitigated by mirroring any -artifacts you depend on onto servers you control and blocking your build system -from accessing third-party artifact repositories like Maven Central. The -trade-off is that these mirrors take effort and resources to maintain, so the -choice of whether to use them often depends on the scale of the project. The -security issue can also be completely prevented with little overhead by -requiring the hash of each third-party artifact to be specified in the source -repository, causing the build to fail if the artifact is tampered with. Another -alternative that completely sidesteps the issue is to vendor your project’s -dependencies. When a project vendors its dependencies, it checks them into -source control alongside the project’s source code, either as source or as -binaries. This effectively means that all of the project’s external dependencies -are converted to internal dependencies. Google uses this approach internally, -checking every third-party library referenced throughout Google into a -`third_party` directory at the root of Google’s source tree. However, this works -at Google only because Google’s source control system is custom built to handle -an extremely large monorepo, so vendoring might not be an option for all -organizations. diff --git a/8.4.2/basics/distributed-builds.mdx b/8.4.2/basics/distributed-builds.mdx deleted file mode 100644 index c32f44f..0000000 --- a/8.4.2/basics/distributed-builds.mdx +++ /dev/null @@ -1,137 +0,0 @@ ---- -title: 'Distributed Builds' ---- - - - -When you have a large codebase, chains of dependencies can become very deep. -Even simple binaries can often depend on tens of thousands of build targets. At -this scale, it’s simply impossible to complete a build in a reasonable amount -of time on a single machine: no build system can get around the fundamental -laws of physics imposed on a machine’s hardware. The only way to make this work -is with a build system that supports distributed builds wherein the units of -work being done by the system are spread across an arbitrary and scalable -number of machines. Assuming we’ve broken the system’s work into small enough -units (more on this later), this would allow us to complete any build of any -size as quickly as we’re willing to pay for. This scalability is the holy grail -we’ve been working toward by defining an artifact-based build system. - -## Remote caching - -The simplest type of distributed build is one that only leverages _remote -caching_, which is shown in Figure 1. - -[![Distributed build with remote caching](/images/distributed-build-remote-cache.png)](/images/distributed-build-remote-cache.png) - -**Figure 1**. A distributed build showing remote caching - -Every system that performs builds, including both developer workstations and -continuous integration systems, shares a reference to a common remote cache -service. This service might be a fast and local short-term storage system like -Redis or a cloud service like Google Cloud Storage. Whenever a user needs to -build an artifact, whether directly or as a dependency, the system first checks -with the remote cache to see if that artifact already exists there. If so, it -can download the artifact instead of building it. If not, the system builds the -artifact itself and uploads the result back to the cache. This means that -low-level dependencies that don’t change very often can be built once and shared -across users rather than having to be rebuilt by each user. At Google, many -artifacts are served from a cache rather than built from scratch, vastly -reducing the cost of running our build system. - -For a remote caching system to work, the build system must guarantee that builds -are completely reproducible. That is, for any build target, it must be possible -to determine the set of inputs to that target such that the same set of inputs -will produce exactly the same output on any machine. This is the only way to -ensure that the results of downloading an artifact are the same as the results -of building it oneself. Note that this requires that each artifact in the cache -be keyed on both its target and a hash of its inputs—that way, different -engineers could make different modifications to the same target at the same -time, and the remote cache would store all of the resulting artifacts and serve -them appropriately without conflict. - -Of course, for there to be any benefit from a remote cache, downloading an -artifact needs to be faster than building it. This is not always the case, -especially if the cache server is far from the machine doing the build. Google’s -network and build system is carefully tuned to be able to quickly share build -results. - -## Remote execution - -Remote caching isn’t a true distributed build. If the cache is lost or if you -make a low-level change that requires everything to be rebuilt, you still need -to perform the entire build locally on your machine. The true goal is to support -remote execution, in which the actual work of doing the build can be spread -across any number of workers. Figure 2 depicts a remote execution system. - -[![Remote execution system](/images/remote-execution-system.png)](/images/remote-execution-system.png) - -**Figure 2**. A remote execution system - -The build tool running on each user’s machine (where users are either human -engineers or automated build systems) sends requests to a central build master. -The build master breaks the requests into their component actions and schedules -the execution of those actions over a scalable pool of workers. Each worker -performs the actions asked of it with the inputs specified by the user and -writes out the resulting artifacts. These artifacts are shared across the other -machines executing actions that require them until the final output can be -produced and sent to the user. - -The trickiest part of implementing such a system is managing the communication -between the workers, the master, and the user’s local machine. Workers might -depend on intermediate artifacts produced by other workers, and the final output -needs to be sent back to the user’s local machine. To do this, we can build on -top of the distributed cache described previously by having each worker write -its results to and read its dependencies from the cache. The master blocks -workers from proceeding until everything they depend on has finished, in which -case they’ll be able to read their inputs from the cache. The final product is -also cached, allowing the local machine to download it. Note that we also need a -separate means of exporting the local changes in the user’s source tree so that -workers can apply those changes before building. - -For this to work, all of the parts of the artifact-based build systems described -earlier need to come together. Build environments must be completely -self-describing so that we can spin up workers without human intervention. Build -processes themselves must be completely self-contained because each step might -be executed on a different machine. Outputs must be completely deterministic so -that each worker can trust the results it receives from other workers. Such -guarantees are extremely difficult for a task-based system to provide, which -makes it nigh-impossible to build a reliable remote execution system on top of -one. - -## Distributed builds at Google - -Since 2008, Google has been using a distributed build system that employs both -remote caching and remote execution, which is illustrated in Figure 3. - -[![High-level build system](/images/high-level-build-system.png)](/images/high-level-build-system.png) - -**Figure 3**. Google’s distributed build system - -Google’s remote cache is called ObjFS. It consists of a backend that stores -build outputs in Bigtables distributed throughout our fleet of production -machines and a frontend FUSE daemon named objfsd that runs on each developer’s -machine. The FUSE daemon allows engineers to browse build outputs as if they -were normal files stored on the workstation, but with the file content -downloaded on-demand only for the few files that are directly requested by the -user. Serving file contents on-demand greatly reduces both network and disk -usage, and the system is able to build twice as fast compared to when we stored -all build output on the developer’s local disk. - -Google’s remote execution system is called Forge. A Forge client in Blaze -(Bazel's internal equivalent) called -the Distributor sends requests for each action to a job running in our -datacenters called the Scheduler. The Scheduler maintains a cache of action -results, allowing it to return a response immediately if the action has already -been created by any other user of the system. If not, it places the action into -a queue. A large pool of Executor jobs continually read actions from this queue, -execute them, and store the results directly in the ObjFS Bigtables. These -results are available to the executors for future actions, or to be downloaded -by the end user via objfsd. - -The end result is a system that scales to efficiently support all builds -performed at Google. And the scale of Google’s builds is truly massive: Google -runs millions of builds executing millions of test cases and producing petabytes -of build outputs from billions of lines of source code every day. Not only does -such a system let our engineers build complex codebases quickly, it also allows -us to implement a huge number of automated tools and systems that rely on our -build. diff --git a/8.4.2/basics/hermeticity.mdx b/8.4.2/basics/hermeticity.mdx deleted file mode 100644 index 282aad8..0000000 --- a/8.4.2/basics/hermeticity.mdx +++ /dev/null @@ -1,109 +0,0 @@ ---- -title: 'Hermeticity' ---- - - - -This page covers hermeticity, the benefits of using hermetic builds, and -strategies for identifying non-hermetic behavior in your builds. - -## Overview - -When given the same input source code and product configuration, a hermetic -build system always returns the same output by isolating the build from changes -to the host system. - -In order to isolate the build, hermetic builds are insensitive to libraries and -other software installed on the local or remote host machine. They depend on -specific versions of build tools, such as compilers, and dependencies, such as -libraries. This makes the build process self-contained as it doesn't rely on -services external to the build environment. - -The two important aspects of hermeticity are: - -* **Isolation**: Hermetic build systems treat tools as source code. They - download copies of tools and manage their storage and use inside managed file - trees. This creates isolation between the host machine and local user, - including installed versions of languages. -* **Source identity**: Hermetic build systems try to ensure the sameness of - inputs. Code repositories, such as Git, identify sets of code mutations with a - unique hash code. Hermetic build systems use this hash to identify changes to - the build's input. - -## Benefits - -The major benefits of hermetic builds are: - -* **Speed**: The output of an action can be cached, and the action need not be - run again unless inputs change. -* **Parallel execution**: For given input and output, the build system can - construct a graph of all actions to calculate efficient and parallel - execution. The build system loads the rules and calculates an action graph - and hash inputs to look up in the cache. -* **Multiple builds**: You can build multiple hermetic builds on the same - machine, each build using different tools and versions. -* **Reproducibility**: Hermetic builds are good for troubleshooting because you - know the exact conditions that produced the build. - -## Identifying non-hermeticity - -If you are preparing to switch to Bazel, migration is easier if you improve -your existing builds' hermeticity in advance. Some common sources of -non-hermeticity in builds are: - -* Arbitrary processing in `.mk` files -* Actions or tooling that create files non-deterministically, usually involving - build IDs or timestamps -* System binaries that differ across hosts (such as `/usr/bin` binaries, absolute - paths, system C++ compilers for native C++ rules autoconfiguration) -* Writing to the source tree during the build. This prevents the same source - tree from being used for another target. The first build writes to the source - tree, fixing the source tree for target A. Then trying to build target B may - fail. - -## Troubleshooting non-hermetic builds - -Starting with local execution, issues that affect local cache hits reveal -non-hermetic actions. - -* Ensure null sequential builds: If you run `make` and get a successful build, - running the build again should not rebuild any targets. If you run each build - step twice or on different systems, compare a hash of the file contents and - get results that differ, the build is not reproducible. -* Run steps to - [debug local cache hits](/remote/cache-remote#troubleshooting-cache-hits) - from a variety of potential client machines to ensure that you catch any - cases of client environment leaking into the actions. -* Execute a build within a docker container that contains nothing but the - checked-out source tree and explicit list of host tools. Build breakages and - error messages will catch implicit system dependencies. -* Discover and fix hermeticity problems using - [remote execution rules](/remote/rules#overview). -* Enable strict [sandboxing](/docs/sandboxing) - at the per-action level, since actions in a build can be stateful and affect - the build or the output. -* [Workspace rules](/remote/workspace) - allow developers to add dependencies to external workspaces, but they are - rich enough to allow arbitrary processing to happen in the process. You can - get a log of some potentially non-hermetic actions in Bazel workspace rules by - adding the flag - `--experimental_workspace_rules_log_file={{ '' }}PATH{{ '' }}` to - your Bazel command. - -Note: Make your build fully hermetic when mixing remote and local execution, -using Bazel’s “dynamic strategy” functionality. Running Bazel inside the remote -Docker container will enable the build to execute the same in both environments. - -## Hermeticity with Bazel - -For more information about how other projects have had success using hermetic -builds with Bazel, see these BazelCon talks: - -* [Building Real-time Systems with Bazel](https://www.youtube.com/watch?v=t_3bckhV_YI) (SpaceX) -* [Bazel Remote Execution and Remote Caching](https://www.youtube.com/watch?v=_bPyEbAyC0s) (Uber and TwoSigma) -* [Faster Builds With Remote Execution and Caching](https://www.youtube.com/watch?v=MyuJRUwT5LI) -* [Fusing Bazel: Faster Incremental Builds](https://www.youtube.com/watch?v=rQd9Zd1ONOw) -* [Remote Execution vs Local Execution](https://www.youtube.com/watch?v=C8wHmIln--g) -* [Improving the Usability of Remote Caching](https://www.youtube.com/watch?v=u5m7V3ZRHLA) (IBM) -* [Building Self Driving Cars with Bazel](https://www.youtube.com/watch?v=Gh4SJuYUoQI&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=4&t=0s) (BMW) -* [Building Self Driving Cars with Bazel + Q&A](https://www.youtube.com/watch?v=fjfFe98LTm8&list=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj&index=29) (GM Cruise) diff --git a/8.4.2/basics/index.mdx b/8.4.2/basics/index.mdx deleted file mode 100644 index f3c833f..0000000 --- a/8.4.2/basics/index.mdx +++ /dev/null @@ -1,57 +0,0 @@ ---- -title: 'Build Basics' ---- - - - -A build system is one of the most important parts of an engineering organization -because each developer interacts with it potentially dozens or hundreds of times -per day. A fully featured build system is necessary to enable developer -productivity as an organization scales. For individual developers, it's -straightforward to just compile your code and so a build system might seem -excessive. But at a larger scale, having a build system helps with managing -shared dependencies, such as relying on another part of the code base, or an -external resource, such as a library. Build systems help to make sure that you -have everything you need to build your code before it starts building. Build -systems also increase velocity when they're set up to help engineers share -resources and results. - -This section covers some history and basics of building and build systems, -including design decisions that went into making Bazel. If you're -familiar with artifact-based build systems, such as Bazel, Buck, and Pants, you -can skip this section, but it's a helpful overview to understand why -artifact-based build systems are excellent at enabling scale. - -Note: Much of this section's content comes from the _Build Systems and -Build Philosophy_ chapter of the -[_Software Engineering at Google_ book](https://abseil.io/resources/swe-book/html/ch18.html). -Thank you to the original author, Erik Kuefler, for allowing its reuse and -modification here! - -* **[Why a Build System?](/basics/build-systems)** - - If you haven't used a build system before, start here. This page covers why - you should use a build system, and why compilers and build scripts aren't - the best choice once your organization starts to scale beyond a few - developers. - -* **[Task-Based Build Systems](/basics/task-based-builds)** - - This page discusses task-based build systems (such as Make, Maven, and - Gradle) and some of their challenges. - -* **[Artifact-Based Build Systems](/basics/artifact-based-builds)** - - This page discusses artifact-based build systems in response to the pain - points of task-based build systems. - -* **[Distributed Builds](/basics/distributed-builds)** - - This page covers distributed builds, or builds that are executed outside of - your local machine. This requires more robust infrastructure to share - resources and build results (and is where the true wizardry happens!) - -* **[Dependency Management](/basics/dependencies)** - - This page covers some complications of dependencies at a large scale and - strategies to counteract those complications. diff --git a/8.4.2/basics/task-based-builds.mdx b/8.4.2/basics/task-based-builds.mdx deleted file mode 100644 index 9dd3f8c..0000000 --- a/8.4.2/basics/task-based-builds.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Task-Based Build Systems' ---- - - - -This page covers task-based build systems, how they work and some of the -complications that can occur with task-based systems. After shell scripts, -task-based build systems are the next logical evolution of building. - - -## Understanding task-based build systems - -In a task-based build system, the fundamental unit of work is the task. Each -task is a script that can execute any sort of logic, and tasks specify other -tasks as dependencies that must run before them. Most major build systems in use -today, such as Ant, Maven, Gradle, Grunt, and Rake, are task based. Instead of -shell scripts, most modern build systems require engineers to create build files -that describe how to perform the build. - -Take this example from the -[Ant manual](https://ant.apache.org/manual/using.html): - -```xml - - - simple example build file - - - - - - - - - - - - - - - - - - - - - - - - - - - - -``` - -The buildfile is written in XML and defines some simple metadata about the build -along with a list of tasks (the `` tags in the XML). (Ant uses the word -_target_ to represent a _task_, and it uses the word _task_ to refer to -_commands_.) Each task executes a list of possible commands defined by Ant, -which here include creating and deleting directories, running `javac`, and -creating a JAR file. This set of commands can be extended by user-provided -plug-ins to cover any sort of logic. Each task can also define the tasks it -depends on via the depends attribute. These dependencies form an acyclic graph, -as seen in Figure 1. - -[![Acrylic graph showing dependencies](/images/task-dependencies.png)](/images/task-dependencies.png) - -Figure 1. An acyclic graph showing dependencies - -Users perform builds by providing tasks to Ant’s command-line tool. For example, -when a user types `ant dist`, Ant takes the following steps: - -1. Loads a file named `build.xml` in the current directory and parses it to - create the graph structure shown in Figure 1. -1. Looks for the task named `dist` that was provided on the command line and - discovers that it has a dependency on the task named `compile`. -1. Looks for the task named `compile` and discovers that it has a dependency on - the task named `init`. -1. Looks for the task named `init` and discovers that it has no dependencies. -1. Executes the commands defined in the `init` task. -1. Executes the commands defined in the `compile` task given that all of that - task’s dependencies have been run. -1. Executes the commands defined in the `dist` task given that all of that - task’s dependencies have been run. - -In the end, the code executed by Ant when running the `dist` task is equivalent -to the following shell script: - -```posix-terminal -./createTimestamp.sh - -mkdir build/ - -javac src/* -d build/ - -mkdir -p dist/lib/ - -jar cf dist/lib/MyProject-$(date --iso-8601).jar build/* -``` - -When the syntax is stripped away, the buildfile and the build script actually -aren’t too different. But we’ve already gained a lot by doing this. We can -create new buildfiles in other directories and link them together. We can easily -add new tasks that depend on existing tasks in arbitrary and complex ways. We -need only pass the name of a single task to the `ant` command-line tool, and it -determines everything that needs to be run. - -Ant is an old piece of software, originally released in 2000. Other tools like -Maven and Gradle have improved on Ant in the intervening years and essentially -replaced it by adding features like automatic management of external -dependencies and a cleaner syntax without any XML. But the nature of these newer -systems remains the same: they allow engineers to write build scripts in a -principled and modular way as tasks and provide tools for executing those tasks -and managing dependencies among them. - -## The dark side of task-based build systems - -Because these tools essentially let engineers define any script as a task, they -are extremely powerful, allowing you to do pretty much anything you can imagine -with them. But that power comes with drawbacks, and task-based build systems can -become difficult to work with as their build scripts grow more complex. The -problem with such systems is that they actually end up giving _too much power to -engineers and not enough power to the system_. Because the system has no idea -what the scripts are doing, performance suffers, as it must be very conservative -in how it schedules and executes build steps. And there’s no way for the system -to confirm that each script is doing what it should, so scripts tend to grow in -complexity and end up being another thing that needs debugging. - -### Difficulty of parallelizing build steps - -Modern development workstations are quite powerful, with multiple cores that are -capable of executing several build steps in parallel. But task-based systems are -often unable to parallelize task execution even when it seems like they should -be able to. Suppose that task A depends on tasks B and C. Because tasks B and C -have no dependency on each other, is it safe to run them at the same time so -that the system can more quickly get to task A? Maybe, if they don’t touch any -of the same resources. But maybe not—perhaps both use the same file to track -their statuses and running them at the same time causes a conflict. There’s no -way in general for the system to know, so either it has to risk these conflicts -(leading to rare but very difficult-to-debug build problems), or it has to -restrict the entire build to running on a single thread in a single process. -This can be a huge waste of a powerful developer machine, and it completely -rules out the possibility of distributing the build across multiple machines. - -### Difficulty performing incremental builds - -A good build system allows engineers to perform reliable incremental builds such -that a small change doesn’t require the entire codebase to be rebuilt from -scratch. This is especially important if the build system is slow and unable to -parallelize build steps for the aforementioned reasons. But unfortunately, -task-based build systems struggle here, too. Because tasks can do anything, -there’s no way in general to check whether they’ve already been done. Many tasks -simply take a set of source files and run a compiler to create a set of -binaries; thus, they don’t need to be rerun if the underlying source files -haven’t changed. But without additional information, the system can’t say this -for sure—maybe the task downloads a file that could have changed, or maybe it -writes a timestamp that could be different on each run. To guarantee -correctness, the system typically must rerun every task during each build. Some -build systems try to enable incremental builds by letting engineers specify the -conditions under which a task needs to be rerun. Sometimes this is feasible, but -often it’s a much trickier problem than it appears. For example, in languages -like C++ that allow files to be included directly by other files, it’s -impossible to determine the entire set of files that must be watched for changes -without parsing the input sources. Engineers often end up taking shortcuts, and -these shortcuts can lead to rare and frustrating problems where a task result is -reused even when it shouldn’t be. When this happens frequently, engineers get -into the habit of running clean before every build to get a fresh state, -completely defeating the purpose of having an incremental build in the first -place. Figuring out when a task needs to be rerun is surprisingly subtle, and is -a job better handled by machines than humans. - -### Difficulty maintaining and debugging scripts - -Finally, the build scripts imposed by task-based build systems are often just -difficult to work with. Though they often receive less scrutiny, build scripts -are code just like the system being built, and are easy places for bugs to hide. -Here are some examples of bugs that are very common when working with a -task-based build system: - -* Task A depends on task B to produce a particular file as output. The owner - of task B doesn’t realize that other tasks rely on it, so they change it to - produce output in a different location. This can’t be detected until someone - tries to run task A and finds that it fails. -* Task A depends on task B, which depends on task C, which is producing a - particular file as output that’s needed by task A. The owner of task B - decides that it doesn’t need to depend on task C any more, which causes task - A to fail even though task B doesn’t care about task C at all! -* The developer of a new task accidentally makes an assumption about the - machine running the task, such as the location of a tool or the value of - particular environment variables. The task works on their machine, but fails - whenever another developer tries it. -* A task contains a nondeterministic component, such as downloading a file - from the internet or adding a timestamp to a build. Now, people get - potentially different results each time they run the build, meaning that - engineers won’t always be able to reproduce and fix one another’s failures - or failures that occur on an automated build system. -* Tasks with multiple dependencies can create race conditions. If task A - depends on both task B and task C, and task B and C both modify the same - file, task A gets a different result depending on which one of tasks B and C - finishes first. - -There’s no general-purpose way to solve these performance, correctness, or -maintainability problems within the task-based framework laid out here. So long -as engineers can write arbitrary code that runs during the build, the system -can’t have enough information to always be able to run builds quickly and -correctly. To solve the problem, we need to take some power out of the hands of -engineers and put it back in the hands of the system and reconceptualize the -role of the system not as running tasks, but as producing artifacts. - -This approach led to the creation of artifact-based build systems, like Blaze -and Bazel. diff --git a/8.4.2/brand/index.mdx b/8.4.2/brand/index.mdx deleted file mode 100644 index 2a21cd4..0000000 --- a/8.4.2/brand/index.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Bazel Brand Guidelines' ---- - - - -The Bazel trademark and logo ("Bazel Trademarks") are trademarks of Google, and -are treated separately from the copyright or patent license grants contained in -the Apache-licensed Bazel repositories on GitHub. Any use of the Bazel -Trademarks other than those permitted in these guidelines must be approved in -advance. - -## Purpose of the Brand Guidelines - -These guidelines exist to ensure that the Bazel project can share its technology -under open source licenses while making sure that the "Bazel" brand is protected -as a meaningful source identifier in a way that's consistent with trademark law. -By adhering to these guidelines, you help to promote the freedom to use and -develop high-quality Bazel technology. - -## Acceptable Uses - -Given the open nature of Bazel, you may use the Bazel trademark to refer to the -project without prior written permission. Examples of these approved references -include the following: - -* To refer to the Bazel Project itself; -* To link to bazel.build; -* To refer to unmodified source code or other files shared by the Bazel - repositories on GitHub; -* In blog posts, news articles, or educational materials about Bazel; -* To accurately identify that your design or implementation is based on, is - for use with, or is compatible with Bazel technology. - -Examples: - -* \[Your Product\] for Bazel -* \[Your Product\] is compatible with Bazel -* \[XYZ\] Conference for Bazel Users - -## General Guidelines - -* The Bazel name may never be used or registered in a manner that would cause - confusion as to Google's sponsorship, affiliation, or endorsement. -* Don't use the Bazel name as part of your company name, product name, domain - name, or social media profile. -* Other than as permitted by these guidelines, the Bazel name should not be - combined with other trademarks, terms, or source identifiers. -* Don't remove, distort or alter any element of the Bazel Trademarks. That - includes modifying the Bazel Trademark, for example, through hyphenation, - combination or abbreviation. Do not shorten, abbreviate, or create acronyms - out of the Bazel Trademarks. -* Don't display the word Bazel using any different stylization, color, or font - from the surrounding text. -* Don't use the term Bazel as a verb or use it in possessive form. -* Don't use the Bazel logo on any website, product UI, or promotional - materials without prior written permission from - [product@bazel.build](mailto:product@bazel.build). - -## Usage for Events and Community Groups - -The Bazel word mark may be used referentially in events, community groups, or -other gatherings related to the Bazel build system, but it may not be used in a -manner that implies official status or endorsement. - -Examples of appropriate naming conventions are: - -* \[XYZ\] Bazel User Group -* Bazel Community Day at \[XYZ\] -* \[XYZ\] Conference for Bazel Users - -where \[XYZ\] represents the location and optionally other wordings. - -Any naming convention that may imply official status or endorsement requires -review for approval from [product@bazel.build](mailto:product@bazel.build). - -Examples of naming conventions that require prior written permission: - -* BazelCon -* Bazel Conference - -## Contact Us - -Please do not hesitate to contact us at -[product@bazel.build](mailto:product@bazel.build) if you are unsure whether your -intended use of the Bazel Trademarks is in compliance with these guidelines, or -to ask for permission to use the Bazel Trademarks, clearly describing the -intended usage and duration. diff --git a/8.4.2/build/share-variables.mdx b/8.4.2/build/share-variables.mdx deleted file mode 100644 index b248034..0000000 --- a/8.4.2/build/share-variables.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Sharing Variables' ---- - - - -`BUILD` files are intended to be simple and declarative. They will typically -consist of a series of target declarations. As your code base and your `BUILD` -files get larger, you will probably notice some duplication, such as: - -``` python -cc_library( - name = "foo", - copts = ["-DVERSION=5"], - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = ["-DVERSION=5"], - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Code duplication in `BUILD` files is usually fine. This can make the file more -readable: each declaration can be read and understood without any context. This -is important, not only for humans, but also for external tools. For example, a -tool might be able to read and update `BUILD` files to add missing dependencies. -Code refactoring and code reuse might prevent this kind of automated -modification. - -If it is useful to share values (for example, if values must be kept in sync), -you can introduce a variable: - -``` python -COPTS = ["-DVERSION=5"] - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` - -Multiple declarations now use the value `COPTS`. By convention, use uppercase -letters to name global constants. - -## Sharing variables across multiple BUILD files - -If you need to share a value across multiple `BUILD` files, you have to put it -in a `.bzl` file. `.bzl` files contain definitions (variables and functions) -that can be used in `BUILD` files. - -In `path/to/variables.bzl`, write: - -``` python -COPTS = ["-DVERSION=5"] -``` - -Then, you can update your `BUILD` files to access the variable: - -``` python -load("//path/to:variables.bzl", "COPTS") - -cc_library( - name = "foo", - copts = COPTS, - srcs = ["foo.cc"], -) - -cc_library( - name = "bar", - copts = COPTS, - srcs = ["bar.cc"], - deps = [":foo"], -) -``` diff --git a/8.4.2/build/style-guide.mdx b/8.4.2/build/style-guide.mdx deleted file mode 100644 index 19a5216..0000000 --- a/8.4.2/build/style-guide.mdx +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: 'BUILD Style Guide' ---- - - - -`BUILD` file formatting follows the same approach as Go, where a standardized -tool takes care of most formatting issues. -[Buildifier](https://github.com/bazelbuild/buildifier) is a tool that parses and -emits the source code in a standard style. Every `BUILD` file is therefore -formatted in the same automated way, which makes formatting a non-issue during -code reviews. It also makes it easier for tools to understand, edit, and -generate `BUILD` files. - -`BUILD` file formatting must match the output of `buildifier`. - -## Formatting example - -```python -# Test code implementing the Foo controller. -package(default_testonly = True) - -py_test( - name = "foo_test", - srcs = glob(["*.py"]), - data = [ - "//data/production/foo:startfoo", - "//foo", - "//third_party/java/jdk:jdk-k8", - ], - flaky = True, - deps = [ - ":check_bar_lib", - ":foo_data_check", - ":pick_foo_port", - "//pyglib", - "//testing/pybase", - ], -) -``` - -## File structure - -**Recommendation**: Use the following order (every element is optional): - -* Package description (a comment) - -* All `load()` statements - -* The `package()` function. - -* Calls to rules and macros - -Buildifier makes a distinction between a standalone comment and a comment -attached to an element. If a comment is not attached to a specific element, use -an empty line after it. The distinction is important when doing automated -changes (for example, to keep or remove a comment when deleting a rule). - -```python -# Standalone comment (such as to make a section in a file) - -# Comment for the cc_library below -cc_library(name = "cc") -``` - -## References to targets in the current package - -Files should be referred to by their paths relative to the package directory -(without ever using up-references, such as `..`). Generated files should be -prefixed with "`:`" to indicate that they are not sources. Source files -should not be prefixed with `:`. Rules should be prefixed with `:`. For -example, assuming `x.cc` is a source file: - -```python -cc_library( - name = "lib", - srcs = ["x.cc"], - hdrs = [":gen_header"], -) - -genrule( - name = "gen_header", - srcs = [], - outs = ["x.h"], - cmd = "echo 'int x();' > $@", -) -``` - -## Target naming - -Target names should be descriptive. If a target contains one source file, -the target should generally have a name derived from that source (for example, a -`cc_library` for `chat.cc` could be named `chat`, or a `java_library` for -`DirectMessage.java` could be named `direct_message`). - -The eponymous target for a package (the target with the same name as the -containing directory) should provide the functionality described by the -directory name. If there is no such target, do not create an eponymous -target. - -Prefer using the short name when referring to an eponymous target (`//x` -instead of `//x:x`). If you are in the same package, prefer the local -reference (`:x` instead of `//x`). - -Avoid using "reserved" target names which have special meaning. This includes -`all`, `__pkg__`, and `__subpackages__`, these names have special -semantics and can cause confusion and unexpected behaviors when they are used. - -In the absence of a prevailing team convention these are some non-binding -recommendations that are broadly used at Google: - -* In general, use ["snake_case"](https://en.wikipedia.org/wiki/Snake_case) - * For a `java_library` with one `src` this means using a name that is not - the same as the filename without the extension - * For Java `*_binary` and `*_test` rules, use - ["Upper CamelCase"](https://en.wikipedia.org/wiki/Camel_case). - This allows for the target name to match one of the `src`s. For - `java_test`, this makes it possible for the `test_class` attribute to be - inferred from the name of the target. -* If there are multiple variants of a particular target then add a suffix to - disambiguate (such as. `:foo_dev`, `:foo_prod` or `:bar_x86`, `:bar_x64`) -* Suffix `_test` targets with `_test`, `_unittest`, `Test`, or `Tests` -* Avoid meaningless suffixes like `_lib` or `_library` (unless necessary to - avoid conflicts between a `_library` target and its corresponding `_binary`) -* For proto related targets: - * `proto_library` targets should have names ending in `_proto` - * Languages specific `*_proto_library` rules should match the underlying - proto but replace `_proto` with a language specific suffix such as: - * **`cc_proto_library`**: `_cc_proto` - * **`java_proto_library`**: `_java_proto` - * **`java_lite_proto_library`**: `_java_proto_lite` - -## Visibility - -Visibility should be scoped as tightly as possible, while still allowing access -by tests and reverse dependencies. Use `__pkg__` and `__subpackages__` as -appropriate. - -Avoid setting package `default_visibility` to `//visibility:public`. -`//visibility:public` should be individually set only for targets in the -project's public API. These could be libraries that are designed to be depended -on by external projects or binaries that could be used by an external project's -build process. - -## Dependencies - -Dependencies should be restricted to direct dependencies (dependencies -needed by the sources listed in the rule). Do not list transitive dependencies. - -Package-local dependencies should be listed first and referred to in a way -compatible with the -[References to targets in the current package](#targets-current-package) -section above (not by their absolute package name). - -Prefer to list dependencies directly, as a single list. Putting the "common" -dependencies of several targets into a variable reduces maintainability, makes -it impossible for tools to change the dependencies of a target, and can lead to -unused dependencies. - -## Globs - -Indicate "no targets" with `[]`. Do not use a glob that matches nothing: it -is more error-prone and less obvious than an empty list. - -### Recursive - -Do not use recursive globs to match source files (for example, -`glob(["**/*.java"])`). - -Recursive globs make `BUILD` files difficult to reason about because they skip -subdirectories containing `BUILD` files. - -Recursive globs are generally less efficient than having a `BUILD` file per -directory with a dependency graph defined between them as this enables better -remote caching and parallelism. - -It is good practice to author a `BUILD` file in each directory and define a -dependency graph between them. - -### Non-recursive - -Non-recursive globs are generally acceptable. - -## Other conventions - - * Use uppercase and underscores to declare constants (such as `GLOBAL_CONSTANT`), - use lowercase and underscores to declare variables (such as `my_variable`). - - * Labels should never be split, even if they are longer than 79 characters. - Labels should be string literals whenever possible. *Rationale*: It makes - find and replace easy. It also improves readability. - - * The value of the name attribute should be a literal constant string (except - in macros). *Rationale*: External tools use the name attribute to refer a - rule. They need to find rules without having to interpret code. - - * When setting boolean-type attributes, use boolean values, not integer values. - For legacy reasons, rules still convert integers to booleans as needed, - but this is discouraged. *Rationale*: `flaky = 1` could be misread as saying - "deflake this target by rerunning it once". `flaky = True` unambiguously says - "this test is flaky". - -## Differences with Python style guide - -Although compatibility with -[Python style guide](https://www.python.org/dev/peps/pep-0008/) -is a goal, there are a few differences: - - * No strict line length limit. Long comments and long strings are often split - to 79 columns, but it is not required. It should not be enforced in code - reviews or presubmit scripts. *Rationale*: Labels can be long and exceed this - limit. It is common for `BUILD` files to be generated or edited by tools, - which does not go well with a line length limit. - - * Implicit string concatenation is not supported. Use the `+` operator. - *Rationale*: `BUILD` files contain many string lists. It is easy to forget a - comma, which leads to a complete different result. This has created many bugs - in the past. [See also this discussion.](https://lwn.net/Articles/551438/) - - * Use spaces around the `=` sign for keywords arguments in rules. *Rationale*: - Named arguments are much more frequent than in Python and are always on a - separate line. Spaces improve readability. This convention has been around - for a long time, and it is not worth modifying all existing `BUILD` files. - - * By default, use double quotation marks for strings. *Rationale*: This is not - specified in the Python style guide, but it recommends consistency. So we - decided to use only double-quoted strings. Many languages use double-quotes - for string literals. - - * Use a single blank line between two top-level definitions. *Rationale*: The - structure of a `BUILD` file is not like a typical Python file. It has only - top-level statements. Using a single-blank line makes `BUILD` files shorter. diff --git a/8.4.2/community/recommended-rules.mdx b/8.4.2/community/recommended-rules.mdx deleted file mode 100644 index 86daa05..0000000 --- a/8.4.2/community/recommended-rules.mdx +++ /dev/null @@ -1,54 +0,0 @@ ---- -title: 'Recommended Rules' ---- - - - -In the documentation, we provide a list of -[recommended rules](/rules). - -This is a set of high quality rules, which will provide a good experience to our -users. We make a distinction between the supported rules, and the hundreds of -rules you can find on the Internet. - -## Nomination - -If a ruleset meets the requirements below, a rule maintainer can nominate it -to be part of the _recommended rules_ by filing a -[GitHub issue](https://github.com/bazelbuild/bazel/). - -After a review by the [Bazel core team](/contribute/policy), it -will be recommended on the Bazel website. - -## Requirements for the rule maintainers - -* The ruleset provides an important feature, useful to a large number of Bazel - users (for example, support for a widely popular language). -* The ruleset is well maintained. There must be at least two active maintainers. -* The ruleset is well documented, with examples, and easy to use. -* The ruleset follows the best practices and is performant (see - [the performance guide](/rules/performance)). -* The ruleset has sufficient test coverage. -* The ruleset is tested on - [BuildKite](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) - with the latest version of Bazel. Tests should always pass (when used as a - presubmit check). -* The ruleset is also tested with the upcoming incompatible changes. Breakages - should be fixed within two weeks. Migration issues should be reported to the - Bazel team quickly. - -## Requirements for Bazel developers - -* Recommended rules are frequently tested with Bazel at head (at least once a - day). -* No change in Bazel may break a recommended rule (with the default set of - flags). If it happens, the change should be fixed or rolled back. - -## Demotion - -If there is a concern that a particular ruleset is no longer meeting the -requirements, a [GitHub issue](https://github.com/bazelbuild/bazel/) should be -filed. - -Rule maintainers will be contacted and need to respond in 2 weeks. Based on the -outcome, Bazel core team might make a decision to demote the rule set. diff --git a/8.4.2/community/remote-execution-services.mdx b/8.4.2/community/remote-execution-services.mdx deleted file mode 100644 index bede2b8..0000000 --- a/8.4.2/community/remote-execution-services.mdx +++ /dev/null @@ -1,28 +0,0 @@ ---- -title: 'Remote Execution Services' ---- - - - -Use the following services to run Bazel with remote execution: - -* Manual - - * Use the [gRPC protocol](https://github.com/bazelbuild/remote-apis) - directly to create your own remote execution service. - -* Self-service - - * [Buildbarn](https://github.com/buildbarn) - * [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) - * [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) - * [NativeLink](https://github.com/TraceMachina/nativelink) - -* Commercial - - * [Aspect Build](https://www.aspect.build/) – Self-hosted remote cache and remote execution services. - * [Bitrise](https://bitrise.io/why/features/mobile-build-caching-for-better-build-test-performance) - Providing the world's leading mobile-first CI/CD and remote build caching platform. - * [BuildBuddy](https://www.buildbuddy.io) - Remote build execution, - caching, and results UI. - * [EngFlow Remote Execution](https://www.engflow.com) - Remote execution - and remote caching service with Build and Test UI. Can be self-hosted or hosted. diff --git a/8.4.2/community/roadmaps-starlark.mdx b/8.4.2/community/roadmaps-starlark.mdx deleted file mode 100644 index 5ce476d..0000000 --- a/8.4.2/community/roadmaps-starlark.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Starlark Roadmap' ---- - - - -*Last verified: 2020-04-21* -([update history](https://github.com/bazelbuild/bazel-website/commits/master/roadmaps/starlark.md)) - -*Point of contact:* [laurentlb](https://github.com/laurentlb) - -## Goal - -Our goal is to make Bazel more extensible. Users should be able to easily -implement their own rules, and support new languages and tools. We want to -improve the experience of writing and maintaining those rules. - -We focus on two areas: - -* Make the language and API simple, yet powerful. -* Provide better tooling for reading, writing, updating, debugging, and testing the code. - - -## Q2 2020 - -Build health and Best practices: - -* P0. Discourage macros without have a name, and ensure the name is a unique - string literal. This work is focused on Google codebase, but may impact - tooling available publicly. -* P0. Make Buildozer commands reliable with regard to selects and variables. -* P1. Make Buildifier remove duplicates in lists that we don’t sort because of - comments. -* P1. Update Buildifier linter to recommend inlining trivial expressions. -* P2. Study use cases for native.existing_rule[s]() and propose alternatives. -* P2. Study use cases for the prelude file and propose alternatives. - -Performance: - -* P1. Optimize the Starlark interpreter using flat environments and bytecode - compilation. - -Technical debt reduction: - -* P0. Add ability to port native symbols to Starlark underneath @bazel_tools. -* P1. Delete obsolete flags (some of them are still used at Google, so we need to - clean the codebase first): `incompatible_always_check_depset_elements`, - `incompatible_disable_deprecated_attr_params`, - `incompatible_no_support_tools_in_action_inputs`, `incompatible_new_actions_api`. -* P1. Ensure the followin flags can be flipped in Bazel 4.0: - `incompatible_disable_depset_items`, `incompatible_no_implicit_file_export`, - `incompatible_run_shell_command_string`, - `incompatible_restrict_string_escapes`. -* P1. Finish lib.syntax work (API cleanup, separation from Bazel). -* P2. Reduce by 50% the build+test latency of a trivial edit to Bazel’s Java packages. - -Community: - -* `rules_python` is active and well-maintained by the community. -* Continuous support for rules_jvm_external (no outstanding pull requests, issue - triage, making releases). -* Maintain Bazel documentation infrastructure: centralize and canonicalize CSS - styles across bazel-website, bazel-blog, docs -* Bazel docs: add CI tests for e2e doc site build to prevent regressions. - -## Q1 2020 - -Build health and Best practices: - -* Allow targets to track their macro call stack, for exporting via `bazel query` -* Implement `--incompatible_no_implicit_file_export` -* Remove the deprecated depset APIs (#5817, #10313, #9017). -* Add a cross file analyzer in Buildifier, implement a check for deprecated - functions. - -Performance: - -* Make Bazel’s own Java-based tests 2x faster. -* Implement a Starlark CPU profiler. - -Technical debt reduction: - -* Remove 8 incompatible flags (after flipping them). -* Finish lib.syntax cleanup work (break dependencies). -* Starlark optimization: flat environment, bytecode compilation -* Delete all serialization from analysis phase, if possible -* Make a plan for simplifying/optimizing lib.packages - -Community: - -* Publish a Glossary containing definitions for all the Bazel-specific terms diff --git a/8.4.2/community/sig.mdx b/8.4.2/community/sig.mdx deleted file mode 100644 index ae5f918..0000000 --- a/8.4.2/community/sig.mdx +++ /dev/null @@ -1,158 +0,0 @@ ---- -title: 'Bazel Special Interest Groups' ---- - - - -Bazel hosts Special Interest Groups (SIGs) to focus collaboration on particular -areas and to support communication and coordination between [Bazel owners, -maintainers, and contributors](/contribute/policy). This policy -applies to [`bazelbuild`](http://github.com/bazelbuild). - -SIGs do their work in public. The ideal scope for a SIG covers a well-defined -domain, where the majority of participation is from the community. SIGs may -focus on community maintained repositories in `bazelbuild` (such as language -rules) or focus on areas of code in the Bazel repository (such as Remote -Execution). - -While not all SIGs will have the same level of energy, breadth of scope, or -governance models, there should be sufficient evidence that there are community -members willing to engage and contribute should the interest group be -established. Before joining, review the group's work, and then get in touch -with the SIG leader. Membership policies vary on a per-SIG basis. - -See the complete list of -[Bazel SIGs](https://github.com/bazelbuild/community/tree/main/sigs). - -### Non-goals: What a SIG is not - -SIGs are intended to facilitate collaboration on shared work. A SIG is -therefore: - -- *Not a support forum:* a mailing list and a SIG is not the same thing -- *Not immediately required:* early on in a project's life, you may not know - if you have shared work or collaborators -- *Not free labor:* energy is required to grow and coordinate the work - collaboratively - -Bazel Owners take a conservative approach to SIG creation—thanks to the ease of -starting projects on GitHub, there are many avenues where collaboration can -happen without the need for a SIG. - -## SIG lifecycle - -This section covers how to create a SIG. - -### Research and consultation - -To propose a new SIG group, first gather evidence for approval, as specified -below. Some possible avenues to consider are: - -- A well-defined problem or set of problems the group would solve -- Consultation with community members who would benefit, assessing both the - benefit and their willingness to commit -- For existing projects, evidence from issues and PRs that contributors care - about the topic -- Potential goals for the group to achieve -- Resource requirements of running the group - -Even if the need for a SIG seems self-evident, the research and consultation is -still important to the success of the group. - -### Create the new group - -The new group should follow the below process for chartering. In particular, it -must demonstrate: - -- A clear purpose and benefit to Bazel (either around a sub-project or - application area) -- Two or more contributors willing to act as group leads, existence of other - contributors, and evidence of demand for the group -- Each group needs to use at least one publicly accessible mailing list. A SIG - may reuse one of the public lists, such as - [bazel-discuss](https://groups.google.com/g/bazel-discuss), ask for a list - for @bazel.build, or create their own list -- Resources the SIG initially requires (usually, mailing list and regular - video call.) -- SIGs can serve documents and files from their directory in - [`bazelbuild/community`](https://github.com/bazelbuild/community) - or from their own repository in the - [`bazelbuild`](https://github.com/bazelbuild) GitHub - organization. SIGs may link to external resources if they choose to organize - their work outside of the `bazelbuild` GitHub organization -- Bazel Owners approve or reject SIG applications and consult other - stakeholders as necessary - -Before entering the formal parts of the process, you should consult with -the Bazel product team, at product@bazel.build. Most SIGs require conversation -and iteration before approval. - -The formal request for the new group is done by submitting a charter as a PR to -[`bazelbuild/community`](https://github.com/bazelbuild/community), -and including the request in the comments on the PR following the template -below. On approval, the PR for the group is merged and the required resources -created. - -### Template Request for New SIG - -To request a new SIG, use the template in the community repo: -[SIG-request-template.md](https://github.com/bazelbuild/community/blob/main/governance/SIG-request-template.md). - -### Chartering - -To establish a group, you need a charter and must follow the Bazel -[code of conduct](https://github.com/bazelbuild/bazel/blob/HEAD/CODE_OF_CONDUCT.md). -Archives of the group will be public. Membership may either be open to all -without approval, or available on request, pending approval of the group -administrator. - -The charter must nominate an administrator. As well as an administrator, the -group must include at least one person as lead (these may be the same person), -who serves as point of contact for coordination as required with the Bazel -product team. - -Group creators must post their charter to the group mailing list. The community -repository in the Bazel GitHub organization archives such documents and -policies. As groups evolve their practices and conventions, they should update -their charters within the relevant part of the community repository. - -### Collaboration and inclusion - -While not mandated, the group should choose to make use of collaboration -via scheduled conference calls or chat channels to conduct meetings. Any such -meetings should be advertised on the mailing list, and notes posted to the -mailing list afterwards. Regular meetings help drive accountability and progress -in a SIG. - -Bazel product team members may proactively monitor and encourage the group to -discussion and action as appropriate. - -### Launch a SIG - -Required activities: - -- Notify Bazel general discussion groups - ([bazel-discuss](https://groups.google.com/g/bazel-discuss), - [bazel-dev](https://groups.google.com/g/bazel-dev)). - -Optional activities: - -- Create a blog post for the Bazel blog - -### Health and termination of SIGs - -The Bazel owners make a best effort to ensure the health of SIGs. Bazel owners -occasionally request the SIG lead to report on the SIG's work, to inform the -broader Bazel community of the group's activity. - -If a SIG no longer has a useful purpose or interested community, it may be -archived and cease operation. The Bazel product team reserves the right to -archive such inactive SIGs to maintain the overall health of the project, -though it is a less preferable outcome. A SIG may also opt to disband if -it recognizes it has reached the end of its useful life. - -## Note - -*This content has been adopted from Tensorflow’s -[SIG playbook](https://www.tensorflow.org/community/sig_playbook) -with modifications.* diff --git a/8.4.2/community/update.mdx b/8.4.2/community/update.mdx deleted file mode 100644 index be0e07d..0000000 --- a/8.4.2/community/update.mdx +++ /dev/null @@ -1,18 +0,0 @@ ---- -title: 'Community updates' ---- - - - -Join Bazel developer relations engineers for the monthly community update -livestream, or catch up on past ones. - -Title | Date | Description | Speakers --------- | -------- | -------- | -------- -[Roadmap Introduction](https://www.youtube.com/watch?v=gYrZDl7K9JM) | 5/19/2022 | The inaugural Bazel Community Update, introducing the community to some of Google's Bazel leadership to talk about the general state of the project and its upcoming roadmap | Sven Tiffe, Tony Aiuto, Radhika Advani -[Hands-On with Bzlmod](https://www.youtube.com/watch?v=MuW5XNcFukE) | 6/23/2022 | This month, we're joined by Google engineers Yun Peng and Xudong Yang to talk about Bzlmod, the new dependency system that is expected to go GA later this year. We'll cover the motivation behind the change, the new capabilities it brings to the table, and walk through some examples of it in action. | Yun Peng, Xudong Yang -[Extending Gazelle to generate BUILD files](https://www.youtube.com/watch?v=E1-U7EAfhXw) | 7/21/2022 | This month we're joined by Son Luong Ngoc who will be showing the Gazelle language extension system. We'll briefly touch on how it works under the covers, existing extensions, and how to go about writing your own extensions to ease the migration to Bazel. | Son Luong Ngoc -[Using Bazel for JavaScript Projects](https://www.youtube.com/watch?v=RIfYqX0JJYk) | 8/18/2022 | In this update, Alex Eagle joins us to talk about running JavaScript build tooling under Bazel. We'll look at a couple of examples: a Vue.js frontend and Nest backend. We'll cover the migration to newer rules_js provided by Aspect, and study how the tooling allows for fetching third-party dependencies and resolving them in the Node.js runtime. | Alex Eagle -[Like Peanut Butter & Jelly: Integrating Bazel with JetBrains IntelliJ](https://www.youtube.com/watch?v=wMrua-W-LC4) | 9/15/2022 | Bazel is awesome. IntelliJ is awesome. Naturally, they are more awesome together. Bazel IntelliJ plugin gurus Mai Hussien from Google and Justin Kaeser from JetBrains join us this month to give a live demo and walkthrough of the plugin's capabilities. Both new and experienced plugin users are welcome to come with questions. | Mai Hussien, Justin Kaeser -[Bazel at scale for surgical robots](https://www.youtube.com/watch?v=kCs1xa45yjM) | 10/27/2022 | What do you do when CMake CI runs for four hours? Join Guillaume Maudoux of Tweag to learn about how they migrated large, embedded robotic applications to Bazel. Topics include configuring toolchains for cross compilation, improving CI performance, managing third-party dependencies, and creating a positive developer experience — everything needed to ensure that Bazel lives up to “{Fast, Correct} — Choose Two”. | Guillaume Maudoux -[The Ghosts of Bazel Past, Present, and Future](https://www.youtube.com/watch?v=uRjSghJQlsw) | 12/22/2022 | For our special holiday Community Update and last of 2022, I'll be joined by Google's Sven Tiffe and Radhika Advani where we'll be visited by the ghosts of Bazel Past (2022 year in review), Present (Bazel 6.0 release), and Future (what to expect in 2023). | Sven Tiffe, Radhika Advani diff --git a/8.4.2/concepts/build-ref.mdx b/8.4.2/concepts/build-ref.mdx deleted file mode 100644 index e8839d4..0000000 --- a/8.4.2/concepts/build-ref.mdx +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: 'Repositories, workspaces, packages, and targets' ---- - - - -Bazel builds software from source code organized in directory trees called -repositories. A defined set of repositories comprises the workspace. Source -files in repositories are organized in a nested hierarchy of packages, where -each package is a directory that contains a set of related source files and one -`BUILD` file. The `BUILD` file specifies what software outputs can be built from -the source. - -### Repositories - -Source files used in a Bazel build are organized in _repositories_ (often -shortened to _repos_). A repo is a directory tree with a boundary marker file at -its root; such a boundary marker file could be `MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`. - -The repo in which the current Bazel command is being run is called the _main -repo_. Other, (external) repos are defined by _repo rules_; see [external -dependencies overview](/external/overview) for more information. - -## Workspace - -A _workspace_ is the environment shared by all Bazel commands run from the same -main repo. It encompasses the main repo and the set of all defined external -repos. - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". - -## Packages - -The primary unit of code organization in a repository is the _package_. A -package is a collection of related files and a specification of how they can be -used to produce output artifacts. - -A package is defined as a directory containing a -[`BUILD` file](/concepts/build-files) named either `BUILD` or `BUILD.bazel`. A -package includes all files in its directory, plus all subdirectories beneath it, -except those which themselves contain a `BUILD` file. From this definition, no -file or directory may be a part of two different packages. - -For example, in the following directory tree there are two packages, `my/app`, -and the subpackage `my/app/tests`. Note that `my/app/data` is not a package, but -a directory belonging to package `my/app`. - -``` -src/my/app/BUILD -src/my/app/app.cc -src/my/app/data/input.txt -src/my/app/tests/BUILD -src/my/app/tests/test.cc -``` - -## Targets - -A package is a container of _targets_, which are defined in the package's -`BUILD` file. Most targets are one of two principal kinds, _files_ and _rules_. - -Files are further divided into two kinds. _Source files_ are usually written by -the efforts of people, and checked in to the repository. _Generated files_, -sometimes called derived files or output files, are not checked in, but are -generated from source files. - -The second kind of target is declared with a _rule_. Each rule instance -specifies the relationship between a set of input and a set of output files. The -inputs to a rule may be source files, but they also may be the outputs of other -rules. - -Whether the input to a rule is a source file or a generated file is in most -cases immaterial; what matters is only the contents of that file. This fact -makes it easy to replace a complex source file with a generated file produced by -a rule, such as happens when the burden of manually maintaining a highly -structured file becomes too tiresome, and someone writes a program to derive it. -No change is required to the consumers of that file. Conversely, a generated -file may easily be replaced by a source file with only local changes. - -The inputs to a rule may also include _other rules_. The precise meaning of such -relationships is often quite complex and language- or rule-dependent, but -intuitively it is simple: a C++ library rule A might have another C++ library -rule B for an input. The effect of this dependency is that B's header files are -available to A during compilation, B's symbols are available to A during -linking, and B's runtime data is available to A during execution. - -An invariant of all rules is that the files generated by a rule always belong to -the same package as the rule itself; it is not possible to generate files into -another package. It is not uncommon for a rule's inputs to come from another -package, though. - -Package groups are sets of packages whose purpose is to limit accessibility of -certain rules. Package groups are defined by the `package_group` function. They -have three properties: the list of packages they contain, their name, and other -package groups they include. The only allowed ways to refer to them are from the -`visibility` attribute of rules or from the `default_visibility` attribute of -the `package` function; they do not generate or consume files. For more -information, refer to the [`package_group` -documentation](/reference/be/functions#package_group). - - - Labels - diff --git a/8.4.2/concepts/platforms.mdx b/8.4.2/concepts/platforms.mdx deleted file mode 100644 index e560ea4..0000000 --- a/8.4.2/concepts/platforms.mdx +++ /dev/null @@ -1,429 +0,0 @@ ---- -title: 'Migrating to Platforms' ---- - - - -Bazel has sophisticated [support](#background) for modeling -[platforms][Platforms] and [toolchains][Toolchains] for multi-architecture and -cross-compiled builds. - -This page summarizes the state of this support. - -Key Point: Bazel's platform and toolchain APIs are available today. Not all -languages support them. Use these APIs with your project if you can. Bazel is -migrating all major languages so eventually all builds will be platform-based. - -See also: - -* [Platforms][Platforms] -* [Toolchains][Toolchains] -* [Background][Background] - -## Status - -### C++ - -C++ rules use platforms to select toolchains when -`--incompatible_enable_cc_toolchain_resolution` is set. - -This means you can configure a C++ project with: - -```posix-terminal -bazel build //:my_cpp_project --platforms=//:myplatform -``` - -instead of the legacy: - -```posix-terminal -bazel build //:my_cpp_project` --cpu=... --crosstool_top=... --compiler=... -``` - -This will be enabled by default in Bazel 7.0 ([#7260](https://github.com/bazelbuild/bazel/issues/7260)). - -To test your C++ project with platforms, see -[Migrating Your Project](#migrating-your-project) and -[Configuring C++ toolchains]. - -### Java - -Java rules use platforms to select toolchains. - -This replaces legacy flags `--java_toolchain`, `--host_java_toolchain`, -`--javabase`, and `--host_javabase`. - -See [Java and Bazel](/docs/bazel-and-java) for details. - -### Android - -Android rules use platforms to select toolchains when -`--incompatible_enable_android_toolchain_resolution` is set. - -This means you can configure an Android project with: - -```posix-terminal -bazel build //:my_android_project --android_platforms=//:my_android_platform -``` - -instead of with legacy flags like `--android_crosstool_top`, `--android_cpu`, -and `--fat_apk_cpu`. - -This will be enabled by default in Bazel 7.0 ([#16285](https://github.com/bazelbuild/bazel/issues/16285)). - -To test your Android project with platforms, see -[Migrating Your Project](#migrating-your-project). - -### Apple - -[Apple rules] do not support platforms and are not yet scheduled -for support. - -You can still use platform APIs with Apple builds (for example, when building -with a mixture of Apple rules and pure C++) with [platform -mappings](#platform-mappings). - -### Other languages - -* [Go rules] fully support platforms -* [Rust rules] fully support platforms. - -If you own a language rule set, see [Migrating your rule set] for adding -support. - -## Background - -*Platforms* and *toolchains* were introduced to standardize how software -projects target different architectures and cross-compile. - -This was -[inspired][Inspiration] -by the observation that language maintainers were already doing this in ad -hoc, incompatible ways. For example, C++ rules used `--cpu` and - `--crosstool_top` to declare a target CPU and toolchain. Neither of these -correctly models a "platform". This produced awkward and incorrect builds. - -Java, Android, and other languages evolved their own flags for similar purposes, -none of which interoperated with each other. This made cross-language builds -confusing and complicated. - -Bazel is intended for large, multi-language, multi-platform projects. This -demands more principled support for these concepts, including a clear -standard API. - -### Need for migration - -Upgrading to the new API requires two efforts: releasing the API and upgrading -rule logic to use it. - -The first is done but the second is ongoing. This consists of ensuring -language-specific platforms and toolchains are defined, language logic reads -toolchains through the new API instead of old flags like `--crosstool_top`, and -`config_setting`s select on the new API instead of old flags. - -This work is straightforward but requires a distinct effort for each language, -plus fair warning for project owners to test against upcoming changes. - -This is why this is an ongoing migration. - -### Goal - -This migration is complete when all projects build with the form: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -This implies: - -1. Your project's rules choose the right toolchains for `//:myplatform`. -1. Your project's dependencies choose the right toolchains for `//:myplatform`. -1. `//:myplatform` references -[common declarations][Common Platform Declarations] -of `CPU`, `OS`, and other generic, language-independent properties -1. All relevant [`select()`s][select()] properly match `//:myplatform`. -1. `//:myplatform` is defined in a clear, accessible place: in your project's -repo if the platform is unique to your project, or some common place all -consuming projects can find it - -Old flags like `--cpu`, `--crosstool_top`, and `--fat_apk_cpu` will be -deprecated and removed as soon as it's safe to do so. - -Ultimately, this will be the *sole* way to configure architectures. - - -## Migrating your project - -If you build with languages that support platforms, your build should already -work with an invocation like: - -```posix-terminal -bazel build //:myproject --platforms=//:myplatform -``` - -See [Status](#status) and your language's documentation for precise details. - -If a language requires a flag to enable platform support, you also need to set -that flag. See [Status](#status) for details. - -For your project to build, you need to check the following: - -1. `//:myplatform` must exist. It's generally the project owner's responsibility - to define platforms because different projects target different machines. - See [Default platforms](#default-platforms). - -1. The toolchains you want to use must exist. If using stock toolchains, the - language owners should include instructions for how to register them. If - writing your own custom toolchains, you need to [register](https://bazel.build/extending/toolchains#registering-building-toolchains) them in your - `MODULE.bazel` file or with [`--extra_toolchains`](https://bazel.build/reference/command-line-reference#flag--extra_toolchains). - -1. `select()`s and [configuration transitions][Starlark transitions] must - resolve properly. See [select()](#select) and [Transitions](#transitions). - -1. If your build mixes languages that do and don't support platforms, you may - need platform mappings to help the legacy languages work with the new API. - See [Platform mappings](#platform-mappings) for details. - -If you still have problems, [reach out](#questions) for support. - -### Default platforms - -Project owners should define explicit -[platforms][Defining Constraints and Platforms] to describe the architectures -they want to build for. These are then triggered with `--platforms`. - -When `--platforms` isn't set, Bazel defaults to a `platform` representing the -local build machine. This is auto-generated at `@platforms//host` (aliased as -`@bazel_tools//tools:host_platform`) -so there's no need to explicitly define it. It maps the local machine's `OS` -and `CPU` with `constraint_value`s declared in -[`@platforms`](https://github.com/bazelbuild/platforms). - -### `select()` - -Projects can [`select()`][select()] on -[`constraint_value` targets][constraint_value Rule] but not complete -platforms. This is intentional so `select()` supports as wide a variety of -machines as possible. A library with `ARM`-specific sources should support *all* -`ARM`-powered machines unless there's reason to be more specific. - -To select on one or more `constraint_value`s, use: - -```python -config_setting( - name = "is_arm", - constraint_values = [ - "@platforms//cpu:arm", - ], -) -``` - -This is equivalent to traditionally selecting on `--cpu`: - -```python -config_setting( - name = "is_arm", - values = { - "cpu": "arm", - }, -) -``` - -More details [here][select() Platforms]. - -`select`s on `--cpu`, `--crosstool_top`, etc. don't understand `--platforms`. -When migrating your project to platforms, you must either convert them to -`constraint_values` or use [platform mappings](#platform-mappings) to support -both styles during migration. - -### Transitions - -[Starlark transitions][Starlark transitions] change -flags down parts of your build graph. If your project uses a transition that -sets `--cpu`, `--crossstool_top`, or other legacy flags, rules that read -`--platforms` won't see these changes. - -When migrating your project to platforms, you must either convert changes like -`return { "//command_line_option:cpu": "arm" }` to `return { -"//command_line_option:platforms": "//:my_arm_platform" }` or use [platform -mappings](#platform-mappings) to support both styles during migration. -window. - -## Migrating your rule set - -If you own a rule set and want to support platforms, you need to: - -1. Have rule logic resolve toolchains with the toolchain API. See - [toolchain API][Toolchains] (`ctx.toolchains`). - -1. Optional: define an `--incompatible_enable_platforms_for_my_language` flag so - rule logic alternately resolves toolchains through the new API or old flags - like `--crosstool_top` during migration testing. - -1. Define the relevant properties that make up platform components. See - [Common platform properties](#common-platform-properties) - -1. Define standard toolchains and make them accessible to users through your - rule's registration instructions ([details](https://bazel.build/extending/toolchains#registering-building-toolchains)) - -1. Ensure [`select()`s](#select) and - [configuration transitions](#transitions) support platforms. This is the - biggest challenge. It's particularly challenging for multi-language projects - (which may fail if *all* languages can't read `--platforms`). - -If you need to mix with rules that don't support platforms, you may need -[platform mappings](#platform-mappings) to bridge the gap. - -### Common platform properties - -Common, cross-language platform properties like `OS` and `CPU` should be -declared in [`@platforms`](https://github.com/bazelbuild/platforms). -This encourages sharing, standardization, and cross-language compatibility. - -Properties unique to your rules should be declared in your rule's repo. This -lets you maintain clear ownership over the specific concepts your rules are -responsible for. - -If your rules use custom-purpose OSes or CPUs, these should be declared in your -rule's repo vs. -[`@platforms`](https://github.com/bazelbuild/platforms). - -## Platform mappings - -*Platform mappings* is a temporary API that lets platform-aware logic mix with -legacy logic in the same build. This is a blunt tool that's only intended to -smooth incompatibilities with different migration timeframes. - -Caution: Only use this if necessary, and expect to eventually eliminate it. - -A platform mapping is a map of either a `platform()` to a -corresponding set of legacy flags or the reverse. For example: - -```python -platforms: - # Maps "--platforms=//platforms:ios" to "--ios_multi_cpus=x86_64 --apple_platform_type=ios". - //platforms:ios - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - -flags: - # Maps "--ios_multi_cpus=x86_64 --apple_platform_type=ios" to "--platforms=//platforms:ios". - --ios_multi_cpus=x86_64 - --apple_platform_type=ios - //platforms:ios - - # Maps "--cpu=darwin_x86_64 --apple_platform_type=macos" to "//platform:macos". - --cpu=darwin_x86_64 - --apple_platform_type=macos - //platforms:macos -``` - -Bazel uses this to guarantee all settings, both platform-based and -legacy, are consistently applied throughout the build, including through -[transitions](#transitions). - -By default Bazel reads mappings from the `platform_mappings` file in your -workspace root. You can also set -`--platform_mappings=//:my_custom_mapping`. - -See the [platform mappings design] for details. - -## API review - -A [`platform`][platform Rule] is a collection of -[`constraint_value` targets][constraint_value Rule]: - -```python -platform( - name = "myplatform", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:arm", - ], -) -``` - -A [`constraint_value`][constraint_value Rule] is a machine -property. Values of the same "kind" are grouped under a common -[`constraint_setting`][constraint_setting Rule]: - -```python -constraint_setting(name = "os") -constraint_value( - name = "linux", - constraint_setting = ":os", -) -constraint_value( - name = "mac", - constraint_setting = ":os", -) -``` - -A [`toolchain`][Toolchains] is a [Starlark rule][Starlark rule]. Its -attributes declare a language's tools (like `compiler = -"//mytoolchain:custom_gcc"`). Its [providers][Starlark Provider] pass -this information to rules that need to build with these tools. - -Toolchains declare the `constraint_value`s of machines they can -[target][target_compatible_with Attribute] -(`target_compatible_with = ["@platforms//os:linux"]`) and machines their tools can -[run on][exec_compatible_with Attribute] -(`exec_compatible_with = ["@platforms//os:mac"]`). - -When building `$ bazel build //:myproject --platforms=//:myplatform`, Bazel -automatically selects a toolchain that can run on the build machine and -build binaries for `//:myplatform`. This is known as *toolchain resolution*. - -The set of available toolchains can be registered in the `MODULE.bazel` file -with [`register_toolchains`][register_toolchains Function] or at the -command line with [`--extra_toolchains`][extra_toolchains Flag]. - -For more information see [here][Toolchains]. - -## Questions - -For general support and questions about the migration timeline, contact -[bazel-discuss] or the owners of the appropriate rules. - -For discussions on the design and evolution of the platform/toolchain APIs, -contact [bazel-dev]. - -## See also - -* [Configurable Builds - Part 1] -* [Platforms] -* [Toolchains] -* [Bazel Platforms Cookbook] -* [Platforms examples] -* [Example C++ toolchain] - -[Android Rules]: /docs/bazel-and-android -[Apple Rules]: https://github.com/bazelbuild/rules_apple -[Background]: #background -[Bazel platforms Cookbook]: https://docs.google.com/document/d/1UZaVcL08wePB41ATZHcxQV4Pu1YfA1RvvWm8FbZHuW8/ -[bazel-dev]: https://groups.google.com/forum/#!forum/bazel-dev -[bazel-discuss]: https://groups.google.com/forum/#!forum/bazel-discuss -[Common Platform Declarations]: https://github.com/bazelbuild/platforms -[constraint_setting Rule]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value Rule]: /reference/be/platforms-and-toolchains#constraint_value -[Configurable Builds - Part 1]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Configuring C++ toolchains]: /tutorials/ccp-toolchain-config -[Defining Constraints and Platforms]: /extending/platforms#constraints-platforms -[Example C++ toolchain]: https://github.com/gregestren/snippets/tree/master/custom_cc_toolchain_with_platforms -[exec_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.exec_compatible_with -[extra_toolchains Flag]: /reference/command-line-reference#flag--extra_toolchains -[Go Rules]: https://github.com/bazelbuild/rules_go -[Inspiration]: https://blog.bazel.build/2019/02/11/configurable-builds-part-1.html -[Migrating your rule set]: #migrating-your-rule-set -[Platforms]: /extending/platforms -[Platforms examples]: https://github.com/hlopko/bazel_platforms_examples -[platform mappings design]: https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls/edit -[platform Rule]: /reference/be/platforms-and-toolchains#platform -[register_toolchains Function]: /rules/lib/globals/module#register_toolchains -[Rust rules]: https://github.com/bazelbuild/rules_rust -[select()]: /docs/configurable-attributes -[select() Platforms]: /docs/configurable-attributes#platforms -[Starlark provider]: /extending/rules#providers -[Starlark rule]: /extending/rules -[Starlark transitions]: /extending/config#user-defined-transitions -[target_compatible_with Attribute]: /reference/be/platforms-and-toolchains#toolchain.target_compatible_with -[Toolchains]: /extending/toolchains diff --git a/8.4.2/concepts/visibility.mdx b/8.4.2/concepts/visibility.mdx deleted file mode 100644 index cb7441d..0000000 --- a/8.4.2/concepts/visibility.mdx +++ /dev/null @@ -1,610 +0,0 @@ ---- -title: 'Visibility' ---- - - - -This page covers Bazel's two visibility systems: -[target visibility](#target-visibility) and [load visibility](#load-visibility). - -Both types of visibility help other developers distinguish between your -library's public API and its implementation details, and help enforce structure -as your workspace grows. You can also use visibility when deprecating a public -API to allow current users while denying new ones. - -## Target visibility - -**Target visibility** controls who may depend on your target — that is, who may -use your target's label inside an attribute such as `deps`. A target will fail -to build during the [analysis](/reference/glossary#analysis-phase) phase if it -violates the visibility of one of its dependencies. - -Generally, a target `A` is visible to a target `B` if they are in the same -location, or if `A` grants visibility to `B`'s location. In the absence of -[symbolic macros](/extending/macros), the term "location" can be simplified -to just "package"; see [below](#symbolic-macros) for more on symbolic macros. - -Visibility is specified by listing allowed packages. Allowing a package does not -necessarily mean that its subpackages are also allowed. For more details on -packages and subpackages, see [Concepts and terminology](/concepts/build-ref). - -For prototyping, you can disable target visibility enforcement by setting the -flag `--check_visibility=false`. This shouldn't be done for production usage in -submitted code. - -The primary way to control visibility is with a rule's -[`visibility`](/reference/be/common-definitions#common.visibility) attribute. -The following subsections describe the attribute's format, how to apply it to -various kinds of targets, and the interaction between the visibility system and -symbolic macros. - -### Visibility specifications - -All rule targets have a `visibility` attribute that takes a list of labels. Each -label has one of the following forms. With the exception of the last form, these -are just syntactic placeholders that don't correspond to any actual target. - -* `"//visibility:public"`: Grants access to all packages. - -* `"//visibility:private"`: Does not grant any additional access; only targets - in this location's package can use this target. - -* `"//foo/bar:__pkg__"`: Grants access to `//foo/bar` (but not its - subpackages). - -* `"//foo/bar:__subpackages__"`: Grants access `//foo/bar` and all of its - direct and indirect subpackages. - -* `"//some_pkg:my_package_group"`: Grants access to all of the packages that - are part of the given [`package_group`](/reference/be/functions#package_group). - - * Package groups use a - [different syntax](/reference/be/functions#package_group.packages) for - specifying packages. Within a package group, the forms - `"//foo/bar:__pkg__"` and `"//foo/bar:__subpackages__"` are respectively - replaced by `"//foo/bar"` and `"//foo/bar/..."`. Likewise, - `"//visibility:public"` and `"//visibility:private"` are just `"public"` - and `"private"`. - -For example, if `//some/package:mytarget` has its `visibility` set to -`[":__subpackages__", "//tests:__pkg__"]`, then it could be used by any target -that is part of the `//some/package/...` source tree, as well as targets -declared in `//tests/BUILD`, but not by targets defined in -`//tests/integration/BUILD`. - -**Best practice:** To make several targets visible to the same set -of packages, use a `package_group` instead of repeating the list in each -target's `visibility` attribute. This increases readability and prevents the -lists from getting out of sync. - -**Best practice:** When granting visibility to another team's project, prefer -`__subpackages__` over `__pkg__` to avoid needless visibility churn as that -project evolves and adds new subpackages. - -Note: The `visibility` attribute may not specify non-`package_group` targets. -Doing so triggers a "Label does not refer to a package group" or "Cycle in -dependency graph" error. - -### Rule target visibility - -A rule target's visibility is determined by taking its `visibility` attribute --- or a suitable default if not given -- and appending the location where the -target was declared. For targets not declared in a symbolic macro, if the -package specifies a [`default_visibility`](/reference/be/functions#package.default_visibility), -this default is used; for all other packages and for targets declared in a -symbolic macro, the default is just `["//visibility:private"]`. - -```starlark -# //mypkg/BUILD - -package(default_visibility = ["//friend:__pkg__"]) - -cc_library( - name = "t1", - ... - # No visibility explicitly specified. - # Effective visibility is ["//friend:__pkg__", "//mypkg:__pkg__"]. - # If no default_visibility were given in package(...), the visibility would - # instead default to ["//visibility:private"], and the effective visibility - # would be ["//mypkg:__pkg__"]. -) - -cc_library( - name = "t2", - ... - visibility = [":clients"], - # Effective visibility is ["//mypkg:clients, "//mypkg:__pkg__"], which will - # expand to ["//another_friend:__subpackages__", "//mypkg:__pkg__"]. -) - -cc_library( - name = "t3", - ... - visibility = ["//visibility:private"], - # Effective visibility is ["//mypkg:__pkg__"] -) - -package_group( - name = "clients", - packages = ["//another_friend/..."], -) -``` - -**Best practice:** Avoid setting `default_visibility` to public. It may be -convenient for prototyping or in small codebases, but the risk of inadvertently -creating public targets increases as the codebase grows. It's better to be -explicit about which targets are part of a package's public interface. - -### Generated file target visibility - -A generated file target has the same visibility as the rule target that -generates it. - -```starlark -# //mypkg/BUILD - -java_binary( - name = "foo", - ... - visibility = ["//friend:__pkg__"], -) -``` - -```starlark -# //friend/BUILD - -some_rule( - name = "bar", - deps = [ - # Allowed directly by visibility of foo. - "//mypkg:foo", - # Also allowed. The java_binary's "_deploy.jar" implicit output file - # target the same visibility as the rule target itself. - "//mypkg:foo_deploy.jar", - ] - ... -) -``` - -### Source file target visibility - -Source file targets can either be explicitly declared using -[`exports_files`](/reference/be/functions#exports_files), or implicitly created -by referring to their filename in a label attribute of a rule (outside of a -symbolic macro). As with rule targets, the location of the call to -`exports_files`, or the BUILD file that referred to the input file, is always -automatically appended to the file's visibility. - -Files declared by `exports_files` can have their visibility set by the -`visibility` parameter to that function. If this parameter is not given, the visibility is public. - -Note: `exports_files` may not be used to override the visibility of a generated -file. - -For files that do not appear in a call to `exports_files`, the visibility -depends on the value of the flag -[`--incompatible_no_implicit_file_export`](https://github.com/bazelbuild/bazel/issues/10225): - -* If the flag is true, the visibility is private. - -* Else, the legacy behavior applies: The visibility is the same as the - `BUILD` file's `default_visibility`, or private if a default visibility is - not specified. - -Avoid relying on the legacy behavior. Always write an `exports_files` -declaration whenever a source file target needs non-private visibility. - -**Best practice:** When possible, prefer to expose a rule target rather than a -source file. For example, instead of calling `exports_files` on a `.java` file, -wrap the file in a non-private `java_library` target. Generally, rule targets -should only directly reference source files that live in the same package. - -#### Example - -File `//frobber/data/BUILD`: - -```starlark -exports_files(["readme.txt"]) -``` - -File `//frobber/bin/BUILD`: - -```starlark -cc_binary( - name = "my-program", - data = ["//frobber/data:readme.txt"], -) -``` - -### Config setting visibility - -Historically, Bazel has not enforced visibility for -[`config_setting`](/reference/be/general#config_setting) targets that are -referenced in the keys of a [`select()`](/reference/be/functions#select). There -are two flags to remove this legacy behavior: - -* [`--incompatible_enforce_config_setting_visibility`](https://github.com/bazelbuild/bazel/issues/12932) - enables visibility checking for these targets. To assist with migration, it - also causes any `config_setting` that does not specify a `visibility` to be - considered public (regardless of package-level `default_visibility`). - -* [`--incompatible_config_setting_private_default_visibility`](https://github.com/bazelbuild/bazel/issues/12933) - causes `config_setting`s that do not specify a `visibility` to respect the - package's `default_visibility` and to fallback on private visibility, just - like any other rule target. It is a no-op if - `--incompatible_enforce_config_setting_visibility` is not set. - -Avoid relying on the legacy behavior. Any `config_setting` that is intended to -be used outside the current package should have an explicit `visibility`, if the -package does not already specify a suitable `default_visibility`. - -### Package group target visibility - -`package_group` targets do not have a `visibility` attribute. They are always -publicly visible. - -### Visibility of implicit dependencies - -Some rules have [implicit dependencies](/extending/rules#private_attributes_and_implicit_dependencies) — -dependencies that are not spelled out in a `BUILD` file but are inherent to -every instance of that rule. For example, a `cc_library` rule might create an -implicit dependency from each of its rule targets to an executable target -representing a C++ compiler. - -The visibility of such an implicit dependency is checked with respect to the -package containing the `.bzl` file in which the rule (or aspect) is defined. In -our example, the C++ compiler could be private so long as it lives in the same -package as the definition of the `cc_library` rule. As a fallback, if the -implicit dependency is not visible from the definition, it is checked with -respect to the `cc_library` target. - -If you want to restrict the usage of a rule to certain packages, use -[load visibility](#load-visibility) instead. - -### Visibility and symbolic macros - -This section describes how the visibility system interacts with -[symbolic macros](/extending/macros). - -#### Locations within symbolic macros - -A key detail of the visibility system is how we determine the location of a -declaration. For targets that are not declared in a symbolic macro, the location -is just the package where the target lives -- the package of the `BUILD` file. -But for targets created in a symbolic macro, the location is the package -containing the `.bzl` file where the macro's definition (the -`my_macro = macro(...)` statement) appears. When a target is created inside -multiple nested targets, it is always the innermost symbolic macro's definition -that is used. - -The same system is used to determine what location to check against a given -dependency's visibility. If the consuming target was created inside a macro, we -look at the innermost macro's definition rather than the package the consuming -target lives in. - -This means that all macros whose code is defined in the same package are -automatically "friends" with one another. Any target directly created by a macro -defined in `//lib:defs.bzl` can be seen from any other macro defined in `//lib`, -regardless of what packages the macros are actually instantiated in. Likewise, -they can see, and can be seen by, targets declared directly in `//lib/BUILD` and -its legacy macros. Conversely, targets that live in the same package cannot -necessarily see one another if at least one of them is created by a symbolic -macro. - -Within a symbolic macro's implementation function, the `visibility` parameter -has the effective value of the macro's `visibility` attribute after appending -the location where the macro was called. The standard way for a macro to export -one of its targets to its caller is to forward this value along to the target's -declaration, as in `some_rule(..., visibility = visibility)`. Targets that omit -this attribute won't be visible to the caller of the macro unless the caller -happens to be in the same package as the macro definition. This behavior -composes, in the sense that a chain of nested calls to submacros may each pass -`visibility = visibility`, re-exporting the inner macro's exported targets to -the caller at each level, without exposing any of the macros' implementation -details. - -#### Delegating privileges to a submacro - -The visibility model has a special feature to allow a macro to delegate its -permissions to a submacro. This is important for factoring and composing macros. - -Suppose you have a macro `my_macro` that creates a dependency edge using a rule -`some_library` from another package: - -```starlark -# //macro/defs.bzl -load("//lib:defs.bzl", "some_library") - -def _impl(name, visibility, ...): - ... - native.genrule( - name = name + "_dependency" - ... - ) - some_library( - name = name + "_consumer", - deps = [name + "_dependency"], - ... - ) - -my_macro = macro(implementation = _impl, ...) -``` - -```starlark -# //pkg/BUILD - -load("//macro:defs.bzl", "my_macro") - -my_macro(name = "foo", ...) -``` - -The `//pkg:foo_dependency` target has no `visibility` specified, so it is only -visible within `//macro`, which works fine for the consuming target. Now, what -happens if the author of `//lib` refactors `some_library` to instead be -implemented using a macro? - -```starlark -# //lib:defs.bzl - -def _impl(name, visibility, deps, ...): - some_rule( - # Main target, exported. - name = name, - visibility = visibility, - deps = deps, - ...) - -some_library = macro(implementation = _impl, ...) -``` - -With this change, `//pkg:foo_consumer`'s location is now `//lib` rather than -`//macro`, so its usage of `//pkg:foo_dependency` violates the dependency's -visibility. The author of `my_macro` can't be expected to pass -`visibility = ["//lib"]` to the declaration of the dependency just to work -around this implementation detail. - -For this reason, when a dependency of a target is also an attribute value of the -macro that declared the target, we check the dependency's visibility against the -location of the macro instead of the location of the consuming target. - -In this example, to validate whether `//pkg:foo_consumer` can see -`//pkg:foo_dependency`, we see that `//pkg:foo_dependency` was also passed as an -input to the call to `some_library` inside of `my_macro`, and instead check the -dependency's visibility against the location of this call, `//macro`. - -This process can repeat recursively, as long as a target or macro declaration is -inside of another symbolic macro taking the dependency's label in one of its -label-typed attributes. - -Note: Visibility delegation does not work for labels that were not passed into -the macro, such as labels derived by string manipulation. - -#### Finalizers - -Targets declared in a rule finalizer (a symbolic macro with `finalizer = True`), -in addition to seeing targets following the usual symbolic macro visibility -rules, can *also* see all targets which are visible to the finalizer target's -package. - -In other words, if you migrate a `native.existing_rules()`-based legacy macro to -a finalizer, the targets declared by the finalizer will still be able to see -their old dependencies. - -It is possible to define targets that a finalizer can introspect using -`native.existing_rules()`, but which it cannot use as dependencies under the -visibility system. For example, if a macro-defined target is not visible to its -own package or to the finalizer macro's definition, and is not delegated to the -finalizer, the finalizer cannot see such a target. Note, however, that a -`native.existing_rules()`-based legacy macro will also be unable to see such a -target. - -## Load visibility - -**Load visibility** controls whether a `.bzl` file may be loaded from other -`BUILD` or `.bzl` files outside the current package. - -In the same way that target visibility protects source code that is encapsulated -by targets, load visibility protects build logic that is encapsulated by `.bzl` -files. For instance, a `BUILD` file author might wish to factor some repetitive -target declarations into a macro in a `.bzl` file. Without the protection of -load visibility, they might find their macro reused by other collaborators in -the same workspace, so that modifying the macro breaks other teams' builds. - -Note that a `.bzl` file may or may not have a corresponding source file target. -If it does, there is no guarantee that the load visibility and the target -visibility coincide. That is, the same `BUILD` file might be able to load the -`.bzl` file but not list it in the `srcs` of a [`filegroup`](/reference/be/general#filegroup), -or vice versa. This can sometimes cause problems for rules that wish to consume -`.bzl` files as source code, such as for documentation generation or testing. - -For prototyping, you may disable load visibility enforcement by setting -`--check_bzl_visibility=false`. As with `--check_visibility=false`, this should -not be done for submitted code. - -Load visibility is available as of Bazel 6.0. - -### Declaring load visibility - -To set the load visibility of a `.bzl` file, call the -[`visibility()`](/rules/lib/globals/bzl#visibility) function from within the file. -The argument to `visibility()` is a list of package specifications, just like -the [`packages`](/reference/be/functions#package_group.packages) attribute of -`package_group`. However, `visibility()` does not accept negative package -specifications. - -The call to `visibility()` must only occur once per file, at the top level (not -inside a function), and ideally immediately following the `load()` statements. - -Unlike target visibility, the default load visibility is always public. Files -that do not call `visibility()` are always loadable from anywhere in the -workspace. It is a good idea to add `visibility("private")` to the top of any -new `.bzl` file that is not specifically intended for use outside the package. - -### Example - -```starlark -# //mylib/internal_defs.bzl - -# Available to subpackages and to mylib's tests. -visibility(["//mylib/...", "//tests/mylib/..."]) - -def helper(...): - ... -``` - -```starlark -# //mylib/rules.bzl - -load(":internal_defs.bzl", "helper") -# Set visibility explicitly, even though public is the default. -# Note the [] can be omitted when there's only one entry. -visibility("public") - -myrule = rule( - ... -) -``` - -```starlark -# //someclient/BUILD - -load("//mylib:rules.bzl", "myrule") # ok -load("//mylib:internal_defs.bzl", "helper") # error - -... -``` - -### Load visibility practices - -This section describes tips for managing load visibility declarations. - -#### Factoring visibilities - -When multiple `.bzl` files should have the same visibility, it can be helpful to -factor their package specifications into a common list. For example: - -```starlark -# //mylib/internal_defs.bzl - -visibility("private") - -clients = [ - "//foo", - "//bar/baz/...", - ... -] -``` - -```starlark -# //mylib/feature_A.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -```starlark -# //mylib/feature_B.bzl - -load(":internal_defs.bzl", "clients") -visibility(clients) - -... -``` - -This helps prevent accidental skew between the various `.bzl` files' -visibilities. It also is more readable when the `clients` list is large. - -#### Composing visibilities - -Sometimes a `.bzl` file might need to be visible to an allowlist that is -composed of multiple smaller allowlists. This is analogous to how a -`package_group` can incorporate other `package_group`s via its -[`includes`](/reference/be/functions#package_group.includes) attribute. - -Suppose you are deprecating a widely used macro. You want it to be visible only -to existing users and to the packages owned by your own team. You might write: - -```starlark -# //mylib/macros.bzl - -load(":internal_defs.bzl", "our_packages") -load("//some_big_client:defs.bzl", "their_remaining_uses") - -# List concatenation. Duplicates are fine. -visibility(our_packages + their_remaining_uses) -``` - -#### Deduplicating with package groups - -Unlike target visibility, you cannot define a load visibility in terms of a -`package_group`. If you want to reuse the same allowlist for both target -visibility and load visibility, it's best to move the list of package -specifications into a .bzl file, where both kinds of declarations may refer to -it. Building off the example in [Factoring visibilities](#factoring-visibilities) -above, you might write: - -```starlark -# //mylib/BUILD - -load(":internal_defs", "clients") - -package_group( - name = "my_pkg_grp", - packages = clients, -) -``` - -This only works if the list does not contain any negative package -specifications. - -#### Protecting individual symbols - -Any Starlark symbol whose name begins with an underscore cannot be loaded from -another file. This makes it easy to create private symbols, but does not allow -you to share these symbols with a limited set of trusted files. On the other -hand, load visibility gives you control over what other packages may see your -`.bzl file`, but does not allow you to prevent any non-underscored symbol from -being loaded. - -Luckily, you can combine these two features to get fine-grained control. - -```starlark -# //mylib/internal_defs.bzl - -# Can't be public, because internal_helper shouldn't be exposed to the world. -visibility("private") - -# Can't be underscore-prefixed, because this is -# needed by other .bzl files in mylib. -def internal_helper(...): - ... - -def public_util(...): - ... -``` - -```starlark -# //mylib/defs.bzl - -load(":internal_defs", "internal_helper", _public_util="public_util") -visibility("public") - -# internal_helper, as a loaded symbol, is available for use in this file but -# can't be imported by clients who load this file. -... - -# Re-export public_util from this file by assigning it to a global variable. -# We needed to import it under a different name ("_public_util") in order for -# this assignment to be legal. -public_util = _public_util -``` - -#### bzl-visibility Buildifier lint - -There is a [Buildifier lint](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#bzl-visibility) -that provides a warning if users load a file from a directory named `internal` -or `private`, when the user's file is not itself underneath the parent of that -directory. This lint predates the load visibility feature and is unnecessary in -workspaces where `.bzl` files declare visibilities. diff --git a/8.4.2/configure/attributes.mdx b/8.4.2/configure/attributes.mdx deleted file mode 100644 index 7bc3f41..0000000 --- a/8.4.2/configure/attributes.mdx +++ /dev/null @@ -1,1097 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but it isn't yet a Bazel feature. -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.4.2/configure/best-practices.mdx b/8.4.2/configure/best-practices.mdx deleted file mode 100644 index abef72e..0000000 --- a/8.4.2/configure/best-practices.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: 'Best Practices' ---- - - - -This page assumes you are familiar with Bazel and provides guidelines and -advice on structuring your projects to take full advantage of Bazel's features. - -The overall goals are: - -- To use fine-grained dependencies to allow parallelism and incrementality. -- To keep dependencies well-encapsulated. -- To make code well-structured and testable. -- To create a build configuration that is easy to understand and maintain. - -These guidelines are not requirements: few projects will be able to adhere to -all of them. As the man page for lint says, "A special reward will be presented -to the first person to produce a real program that produces no errors with -strict checking." However, incorporating as many of these principles as possible -should make a project more readable, less error-prone, and faster to build. - -This page uses the requirement levels described in -[this RFC](https://www.ietf.org/rfc/rfc2119.txt). - -## Running builds and tests - -A project should always be able to run `bazel build //...` and -`bazel test //...` successfully on its stable branch. Targets that are necessary -but do not build under certain circumstances (such as,require specific build -flags, don't build on a certain platform, require license agreements) should be -tagged as specifically as possible (for example, "`requires-osx`"). This -tagging allows targets to be filtered at a more fine-grained level than the -"manual" tag and allows someone inspecting the `BUILD` file to understand what -a target's restrictions are. - -## Third-party dependencies - -You may declare third-party dependencies: - -* Either declare them as remote repositories in the `MODULE.bazel` file. -* Or put them in a directory called `third_party/` under your workspace directory. - -## Depending on binaries - -Everything should be built from source whenever possible. Generally this means -that, instead of depending on a library `some-library.so`, you'd create a -`BUILD` file and build `some-library.so` from its sources, then depend on that -target. - -Always building from source ensures that a build is not using a library that -was built with incompatible flags or a different architecture. There are also -some features like coverage, static analysis, or dynamic analysis that only -work on the source. - -## Versioning - -Prefer building all code from head whenever possible. When versions must be -used, avoid including the version in the target name (for example, `//guava`, -not `//guava-20.0`). This naming makes the library easier to update (only one -target needs to be updated). It's also more resilient to diamond dependency -issues: if one library depends on `guava-19.0` and one depends on `guava-20.0`, -you could end up with a library that tries to depend on two different versions. -If you created a misleading alias to point both targets to one `guava` library, -then the `BUILD` files are misleading. - -## Using the `.bazelrc` file - -For project-specific options, use the configuration file your -`{{ '' }}workspace{{ '' }}/.bazelrc` (see [bazelrc format](/run/bazelrc)). - -If you want to support per-user options for your project that you **do not** -want to check into source control, include the line: - -``` -try-import %workspace%/user.bazelrc -``` -(or any other file name) in your `{{ '' }}workspace{{ '' }}/.bazelrc` -and add `user.bazelrc` to your `.gitignore`. - -## Packages - -Every directory that contains buildable files should be a package. If a `BUILD` -file refers to files in subdirectories (such as, `srcs = ["a/b/C.java"]`) it's -a sign that a `BUILD` file should be added to that subdirectory. The longer -this structure exists, the more likely circular dependencies will be -inadvertently created, a target's scope will creep, and an increasing number -of reverse dependencies will have to be updated. diff --git a/8.4.2/configure/coverage.mdx b/8.4.2/configure/coverage.mdx deleted file mode 100644 index 9a50db0..0000000 --- a/8.4.2/configure/coverage.mdx +++ /dev/null @@ -1,130 +0,0 @@ ---- -title: 'Code coverage with Bazel' ---- - - - -Bazel features a `coverage` sub-command to produce code coverage -reports on repositories that can be tested with `bazel coverage`. Due -to the idiosyncrasies of the various language ecosystems, it is not -always trivial to make this work for a given project. - -This page documents the general process for creating and viewing -coverage reports, and also features some language-specific notes for -languages whose configuration is well-known. It is best read by first -reading [the general section](#creating-a-coverage-report), and then -reading about the requirements for a specific language. Note also the -[remote execution section](#remote-execution), which requires some -additional considerations. - -While a lot of customization is possible, this document focuses on -producing and consuming [`lcov`][lcov] reports, which is currently the -most well-supported route. - -## Creating a coverage report - -### Preparation - -The basic workflow for creating coverage reports requires the -following: - -- A basic repository with test targets -- A toolchain with the language-specific code coverage tools installed -- A correct "instrumentation" configuration - -The former two are language-specific and mostly straightforward, -however the latter can be more difficult for complex projects. - -"Instrumentation" in this case refers to the coverage tools that are -used for a specific target. Bazel allows turning this on for a -specific subset of files using the -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter) -flag, which specifies a filter for targets that are tested with the -instrumentation enabled. To enable instrumentation for tests, the -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -flag is required. - -By default, bazel tries to match the target package(s), and prints the -relevant filter as an `INFO` message. - -### Running coverage - -To produce a coverage report, use [`bazel coverage ---combined_report=lcov -[target]`](/reference/command-line-reference#coverage). This runs the -tests for the target, generating coverage reports in the lcov format -for each file. - -Once finished, bazel runs an action that collects all the produced -coverage files, and merges them into one, which is then finally -created under `$(bazel info -output_path)/_coverage/_coverage_report.dat`. - -Coverage reports are also produced if tests fail, though note that -this does not extend to the failed tests - only passing tests are -reported. - -### Viewing coverage - -The coverage report is only output in the non-human-readable `lcov` -format. From this, we can use the `genhtml` utility (part of [the lcov -project][lcov]) to produce a report that can be viewed in a web -browser: - -```console -genhtml --branch-coverage --output genhtml "$(bazel info output_path)/_coverage/_coverage_report.dat" -``` - -Note that `genhtml` reads the source code as well, to annotate missing -coverage in these files. For this to work, it is expected that -`genhtml` is executed in the root of the bazel project. - -To view the result, simply open the `index.html` file produced in the -`genhtml` directory in any web browser. - -For further help and information around the `genhtml` tool, or the -`lcov` coverage format, see [the lcov project][lcov]. - -## Remote execution - -Running with remote test execution currently has a few caveats: - -- The report combination action cannot yet run remotely. This is - because Bazel does not consider the coverage output files as part of - its graph (see [this issue][remote_report_issue]), and can therefore - not correctly treat them as inputs to the combination action. To - work around this, use `--strategy=CoverageReport=local`. - - Note: It may be necessary to specify something like - `--strategy=CoverageReport=local,remote` instead, if Bazel is set - up to try `local,remote`, due to how Bazel resolves strategies. -- `--remote_download_minimal` and similar flags can also not be used - as a consequence of the former. -- Bazel will currently fail to create coverage information if tests - have been cached previously. To work around this, - `--nocache_test_results` can be set specifically for coverage runs, - although this of course incurs a heavy cost in terms of test times. -- `--experimental_split_coverage_postprocessing` and - `--experimental_fetch_all_coverage_outputs` - - Usually coverage is run as part of the test action, and so by - default, we don't get all coverage back as outputs of the remote - execution by default. These flags override the default and obtain - the coverage data. See [this issue][split_coverage_issue] for more - details. - -## Language-specific configuration - -### Java - -Java should work out-of-the-box with the default configuration. The -[bazel toolchains][bazel_toolchains] contain everything necessary for -remote execution, as well, including JUnit. - -### Python - -See the [`rules_python` coverage docs](https://github.com/bazelbuild/rules_python/blob/main/docs/sphinx/coverage.md) -for additional steps needed to enable coverage support in Python. - -[lcov]: https://github.com/linux-test-project/lcov -[bazel_toolchains]: https://github.com/bazelbuild/bazel-toolchains -[remote_report_issue]: https://github.com/bazelbuild/bazel/issues/4685 -[split_coverage_issue]: https://github.com/bazelbuild/bazel/issues/4685 diff --git a/8.4.2/contribute/breaking-changes.mdx b/8.4.2/contribute/breaking-changes.mdx deleted file mode 100644 index 5dda1b9..0000000 --- a/8.4.2/contribute/breaking-changes.mdx +++ /dev/null @@ -1,147 +0,0 @@ ---- -title: 'Guide for rolling out breaking changes' ---- - - - -It is inevitable that we will make breaking changes to Bazel. We will have to -change our designs and fix the things that do not quite work. However, we need -to make sure that community and Bazel ecosystem can follow along. To that end, -Bazel project has adopted a -[backward compatibility policy](/release/backward-compatibility). -This document describes the process for Bazel contributors to make a breaking -change in Bazel to adhere to this policy. - -1. Follow the [design document policy](/contribute/design-documents). - -1. [File a GitHub issue.](#github-issue) - -1. [Implement the change.](#implementation) - -1. [Update labels.](#labels) - -1. [Update repositories.](#update-repos) - -1. [Flip the incompatible flag.](#flip-flag) - -## GitHub issue - -[File a GitHub issue](https://github.com/bazelbuild/bazel/issues) -in the Bazel repository. -[See example.](https://github.com/bazelbuild/bazel/issues/6611) - -We recommend that: - -* The title starts with the name of the flag (the flag name will start with - `incompatible_`). - -* You add the label - [`incompatible-change`](https://github.com/bazelbuild/bazel/labels/incompatible-change). - -* The description contains a description of the change and a link to relevant - design documents. - -* The description contains a migration recipe, to explain users how they should - update their code. Ideally, when the change is mechanical, include a link to a - migration tool. - -* The description includes an example of the error message users will get if - they don't migrate. This will make the GitHub issue more discoverable from - search engines. Make sure that the error message is helpful and actionable. - When possible, the error message should include the name of the incompatible - flag. - -For the migration tool, consider contributing to -[Buildifier](https://github.com/bazelbuild/buildtools/blob/master/buildifier/README.md). -It is able to apply automated fixes to `BUILD`, `WORKSPACE`, and `.bzl` files. -It may also report warnings. - -## Implementation - -Create a new flag in Bazel. The default value must be false. The help text -should contain the URL of the GitHub issue. As the flag name starts with -`incompatible_`, it needs metadata tags: - -```java - metadataTags = { - OptionMetadataTag.INCOMPATIBLE_CHANGE, - }, -``` - -In the commit description, add a brief summary of the flag. -Also add [`RELNOTES:`](release-notes.md) in the following form: -`RELNOTES: --incompatible_name_of_flag has been added. See #xyz for details` - -The commit should also update the relevant documentation, so that there is no -window of commits in which the code is inconsistent with the docs. Since our -documentation is versioned, changes to the docs will not be inadvertently -released prematurely. - -## Labels - -Once the commit is merged and the incompatible change is ready to be adopted, add the label -[`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) -to the GitHub issue. - -If a problem is found with the flag and users are not expected to migrate yet: -remove the flags `migration-ready`. - -If you plan to flip the flag in the next major release, add label `breaking-change-X.0" to the issue. - -## Updating repositories - -Bazel CI tests a list of important projects at -[Bazel@HEAD + Downstream](https://buildkite.com/bazel/bazel-at-head-plus-downstream). Most of them are often -dependencies of other Bazel projects, therefore it's important to migrate them to unblock the migration for the broader community. To monitor the migration status of those projects, you can use the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags). -Check how this pipeline works [here](https://github.com/bazelbuild/continuous-integration/tree/master/buildkite#checking-incompatible-changes-status-for-downstream-projects). - -Our dev support team monitors the [`migration-ready`](https://github.com/bazelbuild/bazel/labels/migration-ready) label. Once you add this label to the GitHub issue, they will handle the following: - -1. Create a comment in the GitHub issue to track the list of failures and downstream projects that need to be migrated ([see example](https://github.com/bazelbuild/bazel/issues/17032#issuecomment-1353077469)) - -1. File Github issues to notify the owners of every downstream project broken by your incompatible change ([see example](https://github.com/bazelbuild/intellij/issues/4208)) - -1. Follow up to make sure all issues are addressed before the target release date - -Migrating projects in the downstream pipeline is NOT entirely the responsibility of the incompatible change author, but you can do the following to accelerate the migration and make life easier for both Bazel users and the Bazel Green Team. - -1. Send PRs to fix downstream projects. - -1. Reach out to the Bazel community for help on migration (e.g. [Bazel Rules Authors SIG](https://bazel-contrib.github.io/SIG-rules-authors/)). - -## Flipping the flag - -Before flipping the default value of the flag to true, please make sure that: - -* Core repositories in the ecosystem are migrated. - - On the [`bazelisk-plus-incompatible-flags` pipeline](https://buildkite.com/bazel/bazelisk-plus-incompatible-flags), - the flag should appear under `The following flags didn't break any passing Bazel team owned/co-owned projects`. - -* All issues in the checklist are marked as fixed/closed. - -* User concerns and questions have been resolved. - -When the flag is ready to flip in Bazel, but blocked on internal migration at Google, please consider setting the flag value to false in the internal `blazerc` file to unblock the flag flip. By doing this, we can ensure Bazel users depend on the new behaviour by default as early as possible. - -When changing the flag default to true, please: - -* Use `RELNOTES[INC]` in the commit description, with the - following format: - `RELNOTES[INC]: --incompatible_name_of_flag is flipped to true. See #xyz for - details` - You can include additional information in the rest of the commit description. -* Use `Fixes #xyz` in the description, so that the GitHub issue gets closed - when the commit is merged. -* Review and update documentation if needed. -* File a new issue `#abc` to track the removal of the flag. - -## Removing the flag - -After the flag is flipped at HEAD, it should be removed from Bazel eventually. -When you plan to remove the incompatible flag: - -* Consider leaving more time for users to migrate if it's a major incompatible change. - Ideally, the flag should be available in at least one major release. -* For the commit that removes the flag, use `Fixes #abc` in the description - so that the GitHub issue gets closed when the commit is merged. diff --git a/8.4.2/contribute/codebase.mdx b/8.4.2/contribute/codebase.mdx deleted file mode 100644 index 8a13611..0000000 --- a/8.4.2/contribute/codebase.mdx +++ /dev/null @@ -1,1670 +0,0 @@ ---- -title: 'The Bazel codebase' ---- - - - -This document is a description of the codebase and how Bazel is structured. It -is intended for people willing to contribute to Bazel, not for end-users. - -## Introduction - -The codebase of Bazel is large (~350KLOC production code and ~260 KLOC test -code) and no one is familiar with the whole landscape: everyone knows their -particular valley very well, but few know what lies over the hills in every -direction. - -In order for people midway upon the journey not to find themselves within a -forest dark with the straightforward pathway being lost, this document tries to -give an overview of the codebase so that it's easier to get started with -working on it. - -The public version of the source code of Bazel lives on GitHub at -[github.com/bazelbuild/bazel](http://github.com/bazelbuild/bazel). This is not -the "source of truth"; it's derived from a Google-internal source tree that -contains additional functionality that is not useful outside Google. The -long-term goal is to make GitHub the source of truth. - -Contributions are accepted through the regular GitHub pull request mechanism, -and manually imported by a Googler into the internal source tree, then -re-exported back out to GitHub. - -## Client/server architecture - -The bulk of Bazel resides in a server process that stays in RAM between builds. -This allows Bazel to maintain state between builds. - -This is why the Bazel command line has two kinds of options: startup and -command. In a command line like this: - -``` - bazel --host_jvm_args=-Xmx8G build -c opt //foo:bar -``` - -Some options (`--host_jvm_args=`) are before the name of the command to be run -and some are after (`-c opt`); the former kind is called a "startup option" and -affects the server process as a whole, whereas the latter kind, the "command -option", only affects a single command. - -Each server instance has a single associated workspace (collection of source -trees known as "repositories") and each workspace usually has a single active -server instance. This can be circumvented by specifying a custom output base -(see the "Directory layout" section for more information). - -Bazel is distributed as a single ELF executable that is also a valid .zip file. -When you type `bazel`, the above ELF executable implemented in C++ (the -"client") gets control. It sets up an appropriate server process using the -following steps: - -1. Checks whether it has already extracted itself. If not, it does that. This - is where the implementation of the server comes from. -2. Checks whether there is an active server instance that works: it is running, - it has the right startup options and uses the right workspace directory. It - finds the running server by looking at the directory `$OUTPUT_BASE/server` - where there is a lock file with the port the server is listening on. -3. If needed, kills the old server process -4. If needed, starts up a new server process - -After a suitable server process is ready, the command that needs to be run is -communicated to it over a gRPC interface, then the output of Bazel is piped back -to the terminal. Only one command can be running at the same time. This is -implemented using an elaborate locking mechanism with parts in C++ and parts in -Java. There is some infrastructure for running multiple commands in parallel, -since the inability to run `bazel version` in parallel with another command -is somewhat embarrassing. The main blocker is the life cycle of `BlazeModule`s -and some state in `BlazeRuntime`. - -At the end of a command, the Bazel server transmits the exit code the client -should return. An interesting wrinkle is the implementation of `bazel run`: the -job of this command is to run something Bazel just built, but it can't do that -from the server process because it doesn't have a terminal. So instead it tells -the client what binary it should `exec()` and with what arguments. - -When one presses Ctrl-C, the client translates it to a Cancel call on the gRPC -connection, which tries to terminate the command as soon as possible. After the -third Ctrl-C, the client sends a SIGKILL to the server instead. - -The source code of the client is under `src/main/cpp` and the protocol used to -communicate with the server is in `src/main/protobuf/command_server.proto` . - -The main entry point of the server is `BlazeRuntime.main()` and the gRPC calls -from the client are handled by `GrpcServerImpl.run()`. - -## Directory layout - -Bazel creates a somewhat complicated set of directories during a build. A full -description is available in [Output directory layout](/remote/output-directories). - -The "main repo" is the source tree Bazel is run in. It usually corresponds to -something you checked out from source control. The root of this directory is -known as the "workspace root". - -Bazel puts all of its data under the "output user root". This is usually -`$HOME/.cache/bazel/_bazel_${USER}`, but can be overridden using the -`--output_user_root` startup option. - -The "install base" is where Bazel is extracted to. This is done automatically -and each Bazel version gets a subdirectory based on its checksum under the -install base. It's at `$OUTPUT_USER_ROOT/install` by default and can be changed -using the `--install_base` command line option. - -The "output base" is the place where the Bazel instance attached to a specific -workspace writes to. Each output base has at most one Bazel server instance -running at any time. It's usually at `$OUTPUT_USER_ROOT/`. It can be changed using the `--output_base` startup option, -which is, among other things, useful for getting around the limitation that only -one Bazel instance can be running in any workspace at any given time. - -The output directory contains, among other things: - -* The fetched external repositories at `$OUTPUT_BASE/external`. -* The exec root, a directory that contains symlinks to all the source - code for the current build. It's located at `$OUTPUT_BASE/execroot`. During - the build, the working directory is `$EXECROOT/`. We are planning to change this to `$EXECROOT`, although it's a - long term plan because it's a very incompatible change. -* Files built during the build. - -## The process of executing a command - -Once the Bazel server gets control and is informed about a command it needs to -execute, the following sequence of events happens: - -1. `BlazeCommandDispatcher` is informed about the new request. It decides - whether the command needs a workspace to run in (almost every command except - for ones that don't have anything to do with source code, such as version or - help) and whether another command is running. - -2. The right command is found. Each command must implement the interface - `BlazeCommand` and must have the `@Command` annotation (this is a bit of an - antipattern, it would be nice if all the metadata a command needs was - described by methods on `BlazeCommand`) - -3. The command line options are parsed. Each command has different command line - options, which are described in the `@Command` annotation. - -4. An event bus is created. The event bus is a stream for events that happen - during the build. Some of these are exported to outside of Bazel under the - aegis of the Build Event Protocol in order to tell the world how the build - goes. - -5. The command gets control. The most interesting commands are those that run a - build: build, test, run, coverage and so on: this functionality is - implemented by `BuildTool`. - -6. The set of target patterns on the command line is parsed and wildcards like - `//pkg:all` and `//pkg/...` are resolved. This is implemented in - `AnalysisPhaseRunner.evaluateTargetPatterns()` and reified in Skyframe as - `TargetPatternPhaseValue`. - -7. The loading/analysis phase is run to produce the action graph (a directed - acyclic graph of commands that need to be executed for the build). - -8. The execution phase is run. This means running every action required to - build the top-level targets that are requested are run. - -## Command line options - -The command line options for a Bazel invocation are described in an -`OptionsParsingResult` object, which in turn contains a map from "option -classes" to the values of the options. An "option class" is a subclass of -`OptionsBase` and groups command line options together that are related to each -other. For example: - -1. Options related to a programming language (`CppOptions` or `JavaOptions`). - These should be a subclass of `FragmentOptions` and are eventually wrapped - into a `BuildOptions` object. -2. Options related to the way Bazel executes actions (`ExecutionOptions`) - -These options are designed to be consumed in the analysis phase and (either -through `RuleContext.getFragment()` in Java or `ctx.fragments` in Starlark). -Some of them (for example, whether to do C++ include scanning or not) are read -in the execution phase, but that always requires explicit plumbing since -`BuildConfiguration` is not available then. For more information, see the -section "Configurations". - -**WARNING:** We like to pretend that `OptionsBase` instances are immutable and -use them that way (such as a part of `SkyKeys`). This is not the case and -modifying them is a really good way to break Bazel in subtle ways that are hard -to debug. Unfortunately, making them actually immutable is a large endeavor. -(Modifying a `FragmentOptions` immediately after construction before anyone else -gets a chance to keep a reference to it and before `equals()` or `hashCode()` is -called on it is okay.) - -Bazel learns about option classes in the following ways: - -1. Some are hard-wired into Bazel (`CommonCommandOptions`) -2. From the `@Command` annotation on each Bazel command -3. From `ConfiguredRuleClassProvider` (these are command line options related - to individual programming languages) -4. Starlark rules can also define their own options (see - [here](/extending/config)) - -Each option (excluding Starlark-defined options) is a member variable of a -`FragmentOptions` subclass that has the `@Option` annotation, which specifies -the name and the type of the command line option along with some help text. - -The Java type of the value of a command line option is usually something simple -(a string, an integer, a Boolean, a label, etc.). However, we also support -options of more complicated types; in this case, the job of converting from the -command line string to the data type falls to an implementation of -`com.google.devtools.common.options.Converter`. - -## The source tree, as seen by Bazel - -Bazel is in the business of building software, which happens by reading and -interpreting the source code. The totality of the source code Bazel operates on -is called "the workspace" and it is structured into repositories, packages and -rules. - -### Repositories - -A "repository" is a source tree on which a developer works; it usually -represents a single project. Bazel's ancestor, Blaze, operated on a monorepo, -that is, a single source tree that contains all source code used to run the build. -Bazel, in contrast, supports projects whose source code spans multiple -repositories. The repository from which Bazel is invoked is called the "main -repository", the others are called "external repositories". - -A repository is marked by a repo boundary file (`MODULE.bazel`, `REPO.bazel`, or -in legacy contexts, `WORKSPACE` or `WORKSPACE.bazel`) in its root directory. The -main repo is the source tree where you're invoking Bazel from. External repos -are defined in various ways; see [external dependencies -overview](/external/overview) for more information. - -Code of external repositories is symlinked or downloaded under -`$OUTPUT_BASE/external`. - -When running the build, the whole source tree needs to be pieced together; this -is done by `SymlinkForest`, which symlinks every package in the main repository -to `$EXECROOT` and every external repository to either `$EXECROOT/external` or -`$EXECROOT/..`. - -### Packages - -Every repository is composed of packages, a collection of related files and -a specification of the dependencies. These are specified by a file called -`BUILD` or `BUILD.bazel`. If both exist, Bazel prefers `BUILD.bazel`; the reason -why `BUILD` files are still accepted is that Bazel's ancestor, Blaze, used this -file name. However, it turned out to be a commonly used path segment, especially -on Windows, where file names are case-insensitive. - -Packages are independent of each other: changes to the `BUILD` file of a package -cannot cause other packages to change. The addition or removal of `BUILD` files -_can _change other packages, since recursive globs stop at package boundaries -and thus the presence of a `BUILD` file stops the recursion. - -The evaluation of a `BUILD` file is called "package loading". It's implemented -in the class `PackageFactory`, works by calling the Starlark interpreter and -requires knowledge of the set of available rule classes. The result of package -loading is a `Package` object. It's mostly a map from a string (the name of a -target) to the target itself. - -A large chunk of complexity during package loading is globbing: Bazel does not -require every source file to be explicitly listed and instead can run globs -(such as `glob(["**/*.java"])`). Unlike the shell, it supports recursive globs that -descend into subdirectories (but not into subpackages). This requires access to -the file system and since that can be slow, we implement all sorts of tricks to -make it run in parallel and as efficiently as possible. - -Globbing is implemented in the following classes: - -* `LegacyGlobber`, a fast and blissfully Skyframe-unaware globber -* `SkyframeHybridGlobber`, a version that uses Skyframe and reverts back to - the legacy globber in order to avoid "Skyframe restarts" (described below) - -The `Package` class itself contains some members that are exclusively used to -parse the "external" package (related to external dependencies) and which do not -make sense for real packages. This is -a design flaw because objects describing regular packages should not contain -fields that describe something else. These include: - -* The repository mappings -* The registered toolchains -* The registered execution platforms - -Ideally, there would be more separation between parsing the "external" package -from parsing regular packages so that `Package` does not need to cater for the -needs of both. This is unfortunately difficult to do because the two are -intertwined quite deeply. - -### Labels, Targets, and Rules - -Packages are composed of targets, which have the following types: - -1. **Files:** things that are either the input or the output of the build. In - Bazel parlance, we call them _artifacts_ (discussed elsewhere). Not all - files created during the build are targets; it's common for an output of - Bazel not to have an associated label. -2. **Rules:** these describe steps to derive its outputs from its inputs. They - are generally associated with a programming language (such as `cc_library`, - `java_library` or `py_library`), but there are some language-agnostic ones - (such as `genrule` or `filegroup`) -3. **Package groups:** discussed in the [Visibility](#visibility) section. - -The name of a target is called a _Label_. The syntax of labels is -`@repo//pac/kage:name`, where `repo` is the name of the repository the Label is -in, `pac/kage` is the directory its `BUILD` file is in and `name` is the path of -the file (if the label refers to a source file) relative to the directory of the -package. When referring to a target on the command line, some parts of the label -can be omitted: - -1. If the repository is omitted, the label is taken to be in the main - repository. -2. If the package part is omitted (such as `name` or `:name`), the label is taken - to be in the package of the current working directory (relative paths - containing uplevel references (..) are not allowed) - -A kind of a rule (such as "C++ library") is called a "rule class". Rule classes may -be implemented either in Starlark (the `rule()` function) or in Java (so called -"native rules", type `RuleClass`). In the long term, every language-specific -rule will be implemented in Starlark, but some legacy rule families (such as Java -or C++) are still in Java for the time being. - -Starlark rule classes need to be imported at the beginning of `BUILD` files -using the `load()` statement, whereas Java rule classes are "innately" known by -Bazel, by virtue of being registered with the `ConfiguredRuleClassProvider`. - -Rule classes contain information such as: - -1. Its attributes (such as `srcs`, `deps`): their types, default values, - constraints, etc. -2. The configuration transitions and aspects attached to each attribute, if any -3. The implementation of the rule -4. The transitive info providers the rule "usually" creates - -**Terminology note:** In the codebase, we often use "Rule" to mean the target -created by a rule class. But in Starlark and in user-facing documentation, -"Rule" should be used exclusively to refer to the rule class itself; the target -is just a "target". Also note that despite `RuleClass` having "class" in its -name, there is no Java inheritance relationship between a rule class and targets -of that type. - -## Skyframe - -The evaluation framework underlying Bazel is called Skyframe. Its model is that -everything that needs to be built during a build is organized into a directed -acyclic graph with edges pointing from any pieces of data to its dependencies, -that is, other pieces of data that need to be known to construct it. - -The nodes in the graph are called `SkyValue`s and their names are called -`SkyKey`s. Both are deeply immutable; only immutable objects should be -reachable from them. This invariant almost always holds, and in case it doesn't -(such as for the individual options classes `BuildOptions`, which is a member of -`BuildConfigurationValue` and its `SkyKey`) we try really hard not to change -them or to change them in only ways that are not observable from the outside. -From this it follows that everything that is computed within Skyframe (such as -configured targets) must also be immutable. - -The most convenient way to observe the Skyframe graph is to run `bazel dump ---skyframe=deps`, which dumps the graph, one `SkyValue` per line. It's best -to do it for tiny builds, since it can get pretty large. - -Skyframe lives in the `com.google.devtools.build.skyframe` package. The -similarly-named package `com.google.devtools.build.lib.skyframe` contains the -implementation of Bazel on top of Skyframe. More information about Skyframe is -available [here](/reference/skyframe). - -To evaluate a given `SkyKey` into a `SkyValue`, Skyframe will invoke the -`SkyFunction` corresponding to the type of the key. During the function's -evaluation, it may request other dependencies from Skyframe by calling the -various overloads of `SkyFunction.Environment.getValue()`. This has the -side-effect of registering those dependencies into Skyframe's internal graph, so -that Skyframe will know to re-evaluate the function when any of its dependencies -change. In other words, Skyframe's caching and incremental computation work at -the granularity of `SkyFunction`s and `SkyValue`s. - -Whenever a `SkyFunction` requests a dependency that is unavailable, `getValue()` -will return null. The function should then yield control back to Skyframe by -itself returning null. At some later point, Skyframe will evaluate the -unavailable dependency, then restart the function from the beginning — only this -time the `getValue()` call will succeed with a non-null result. - -A consequence of this is that any computation performed inside the `SkyFunction` -prior to the restart must be repeated. But this does not include work done to -evaluate dependency `SkyValues`, which are cached. Therefore, we commonly work -around this issue by: - -1. Declaring dependencies in batches (by using `getValuesAndExceptions()`) to - limit the number of restarts. -2. Breaking up a `SkyValue` into separate pieces computed by different - `SkyFunction`s, so that they can be computed and cached independently. This - should be done strategically, since it has the potential to increases memory - usage. -3. Storing state between restarts, either using - `SkyFunction.Environment.getState()`, or keeping an ad hoc static cache - "behind the back of Skyframe". With complex SkyFunctions, state management - between restarts can get tricky, so - [`StateMachine`s](/contribute/statemachine-guide) were introduced for a - structured approach to logical concurrency, including hooks to suspend and - resume hierarchical computations within a `SkyFunction`. Example: - [`DependencyResolver#computeDependencies`][statemachine_example] - uses a `StateMachine` with `getState()` to compute the potentially huge set - of direct dependencies of a configured target, which otherwise can result in - expensive restarts. - -[statemachine_example]: https://developers.google.com/devsite/reference/markdown/links#reference_links - -Fundamentally, Bazel need these types of workarounds because hundreds of -thousands of in-flight Skyframe nodes is common, and Java's support of -lightweight threads [does not outperform][virtual_threads] the -`StateMachine` implementation as of 2023. - -[virtual_threads]: /contribute/statemachine-guide#epilogue_eventually_removing_callbacks - -## Starlark - -Starlark is the domain-specific language people use to configure and extend -Bazel. It's conceived as a restricted subset of Python that has far fewer types, -more restrictions on control flow, and most importantly, strong immutability -guarantees to enable concurrent reads. It is not Turing-complete, which -discourages some (but not all) users from trying to accomplish general -programming tasks within the language. - -Starlark is implemented in the `net.starlark.java` package. -It also has an independent Go implementation -[here](https://github.com/google/starlark-go). The Java -implementation used in Bazel is currently an interpreter. - -Starlark is used in several contexts, including: - -1. **`BUILD` files.** This is where new build targets are defined. Starlark - code running in this context only has access to the contents of the `BUILD` - file itself and `.bzl` files loaded by it. -2. **The `MODULE.bazel` file.** This is where external dependencies are - defined. Starlark code running in this context only has very limited access - to a few predefined directives. -3. **`.bzl` files.** This is where new build rules, repo rules, module - extensions are defined. Starlark code here can define new functions and load - from other `.bzl` files. - -The dialects available for `BUILD` and `.bzl` files are slightly different -because they express different things. A list of differences is available -[here](/rules/language#differences-between-build-and-bzl-files). - -More information about Starlark is available [here](/rules/language). - -## The loading/analysis phase - -The loading/analysis phase is where Bazel determines what actions are needed to -build a particular rule. Its basic unit is a "configured target", which is, -quite sensibly, a (target, configuration) pair. - -It's called the "loading/analysis phase" because it can be split into two -distinct parts, which used to be serialized, but they can now overlap in time: - -1. Loading packages, that is, turning `BUILD` files into the `Package` objects - that represent them -2. Analyzing configured targets, that is, running the implementation of the - rules to produce the action graph - -Each configured target in the transitive closure of the configured targets -requested on the command line must be analyzed bottom-up; that is, leaf nodes -first, then up to the ones on the command line. The inputs to the analysis of -a single configured target are: - -1. **The configuration.** ("how" to build that rule; for example, the target - platform but also things like command line options the user wants to be - passed to the C++ compiler) -2. **The direct dependencies.** Their transitive info providers are available - to the rule being analyzed. They are called like that because they provide a - "roll-up" of the information in the transitive closure of the configured - target, such as all the .jar files on the classpath or all the .o files that - need to be linked into a C++ binary) -3. **The target itself**. This is the result of loading the package the target - is in. For rules, this includes its attributes, which is usually what - matters. -4. **The implementation of the configured target.** For rules, this can either - be in Starlark or in Java. All non-rule configured targets are implemented - in Java. - -The output of analyzing a configured target is: - -1. The transitive info providers that configured targets that depend on it can - access -2. The artifacts it can create and the actions that produce them. - -The API offered to Java rules is `RuleContext`, which is the equivalent of the -`ctx` argument of Starlark rules. Its API is more powerful, but at the same -time, it's easier to do Bad Things™, for example to write code whose time or -space complexity is quadratic (or worse), to make the Bazel server crash with a -Java exception or to violate invariants (such as by inadvertently modifying an -`Options` instance or by making a configured target mutable) - -The algorithm that determines the direct dependencies of a configured target -lives in `DependencyResolver.dependentNodeMap()`. - -### Configurations - -Configurations are the "how" of building a target: for what platform, with what -command line options, etc. - -The same target can be built for multiple configurations in the same build. This -is useful, for example, when the same code is used for a tool that's run during -the build and for the target code and we are cross-compiling or when we are -building a fat Android app (one that contains native code for multiple CPU -architectures) - -Conceptually, the configuration is a `BuildOptions` instance. However, in -practice, `BuildOptions` is wrapped by `BuildConfiguration` that provides -additional sundry pieces of functionality. It propagates from the top of the -dependency graph to the bottom. If it changes, the build needs to be -re-analyzed. - -This results in anomalies like having to re-analyze the whole build if, for -example, the number of requested test runs changes, even though that only -affects test targets (we have plans to "trim" configurations so that this is -not the case, but it's not ready yet). - -When a rule implementation needs part of the configuration, it needs to declare -it in its definition using `RuleClass.Builder.requiresConfigurationFragments()` -. This is both to avoid mistakes (such as Python rules using the Java fragment) and -to facilitate configuration trimming so that such as if Python options change, C++ -targets don't need to be re-analyzed. - -The configuration of a rule is not necessarily the same as that of its "parent" -rule. The process of changing the configuration in a dependency edge is called a -"configuration transition". It can happen in two places: - -1. On a dependency edge. These transitions are specified in - `Attribute.Builder.cfg()` and are functions from a `Rule` (where the - transition happens) and a `BuildOptions` (the original configuration) to one - or more `BuildOptions` (the output configuration). -2. On any incoming edge to a configured target. These are specified in - `RuleClass.Builder.cfg()`. - -The relevant classes are `TransitionFactory` and `ConfigurationTransition`. - -Configuration transitions are used, for example: - -1. To declare that a particular dependency is used during the build and it - should thus be built in the execution architecture -2. To declare that a particular dependency must be built for multiple - architectures (such as for native code in fat Android APKs) - -If a configuration transition results in multiple configurations, it's called a -_split transition._ - -Configuration transitions can also be implemented in Starlark (documentation -[here](/extending/config)) - -### Transitive info providers - -Transitive info providers are a way (and the _only _way) for configured targets -to learn things about other configured targets that they depend on, and the only -way to tell things about themselves to other configured targets that depend on -them. The reason why "transitive" is in their name is that this is usually some -sort of roll-up of the transitive closure of a configured target. - -There is generally a 1:1 correspondence between Java transitive info providers -and Starlark ones (the exception is `DefaultInfo` which is an amalgamation of -`FileProvider`, `FilesToRunProvider` and `RunfilesProvider` because that API was -deemed to be more Starlark-ish than a direct transliteration of the Java one). -Their key is one of the following things: - -1. A Java Class object. This is only available for providers that are not - accessible from Starlark. These providers are a subclass of - `TransitiveInfoProvider`. -2. A string. This is legacy and heavily discouraged since it's susceptible to - name clashes. Such transitive info providers are direct subclasses of - `build.lib.packages.Info` . -3. A provider symbol. This can be created from Starlark using the `provider()` - function and is the recommended way to create new providers. The symbol is - represented by a `Provider.Key` instance in Java. - -New providers implemented in Java should be implemented using `BuiltinProvider`. -`NativeProvider` is deprecated (we haven't had time to remove it yet) and -`TransitiveInfoProvider` subclasses cannot be accessed from Starlark. - -### Configured targets - -Configured targets are implemented as `RuleConfiguredTargetFactory`. There is a -subclass for each rule class implemented in Java. Starlark configured targets -are created through `StarlarkRuleConfiguredTargetUtil.buildRule()` . - -Configured target factories should use `RuleConfiguredTargetBuilder` to -construct their return value. It consists of the following things: - -1. Their `filesToBuild`, the hazy concept of "the set of files this rule - represents." These are the files that get built when the configured target - is on the command line or in the srcs of a genrule. -2. Their runfiles, regular and data. -3. Their output groups. These are various "other sets of files" the rule can - build. They can be accessed using the output\_group attribute of the - filegroup rule in BUILD and using the `OutputGroupInfo` provider in Java. - -### Runfiles - -Some binaries need data files to run. A prominent example is tests that need -input files. This is represented in Bazel by the concept of "runfiles". A -"runfiles tree" is a directory tree of the data files for a particular binary. -It is created in the file system as a symlink tree with individual symlinks -pointing to the files in the source or output trees. - -A set of runfiles is represented as a `Runfiles` instance. It is conceptually a -map from the path of a file in the runfiles tree to the `Artifact` instance that -represents it. It's a little more complicated than a single `Map` for two -reasons: - -* Most of the time, the runfiles path of a file is the same as its execpath. - We use this to save some RAM. -* There are various legacy kinds of entries in runfiles trees, which also need - to be represented. - -Runfiles are collected using `RunfilesProvider`: an instance of this class -represents the runfiles a configured target (such as a library) and its transitive -closure needs and they are gathered like a nested set (in fact, they are -implemented using nested sets under the cover): each target unions the runfiles -of its dependencies, adds some of its own, then sends the resulting set upwards -in the dependency graph. A `RunfilesProvider` instance contains two `Runfiles` -instances, one for when the rule is depended on through the "data" attribute and -one for every other kind of incoming dependency. This is because a target -sometimes presents different runfiles when depended on through a data attribute -than otherwise. This is undesired legacy behavior that we haven't gotten around -removing yet. - -Runfiles of binaries are represented as an instance of `RunfilesSupport`. This -is different from `Runfiles` because `RunfilesSupport` has the capability of -actually being built (unlike `Runfiles`, which is just a mapping). This -necessitates the following additional components: - -* **The input runfiles manifest.** This is a serialized description of the - runfiles tree. It is used as a proxy for the contents of the runfiles tree - and Bazel assumes that the runfiles tree changes if and only if the contents - of the manifest change. -* **The output runfiles manifest.** This is used by runtime libraries that - handle runfiles trees, notably on Windows, which sometimes doesn't support - symbolic links. -* **The runfiles middleman.** In order for a runfiles tree to exist, one needs - to build the symlink tree and the artifact the symlinks point to. In order - to decrease the number of dependency edges, the runfiles middleman can be - used to represent all these. -* **Command line arguments** for running the binary whose runfiles the - `RunfilesSupport` object represents. - -### Aspects - -Aspects are a way to "propagate computation down the dependency graph". They are -described for users of Bazel -[here](/extending/aspects). A good -motivating example is protocol buffers: a `proto_library` rule should not know -about any particular language, but building the implementation of a protocol -buffer message (the "basic unit" of protocol buffers) in any programming -language should be coupled to the `proto_library` rule so that if two targets in -the same language depend on the same protocol buffer, it gets built only once. - -Just like configured targets, they are represented in Skyframe as a `SkyValue` -and the way they are constructed is very similar to how configured targets are -built: they have a factory class called `ConfiguredAspectFactory` that has -access to a `RuleContext`, but unlike configured target factories, it also knows -about the configured target it is attached to and its providers. - -The set of aspects propagated down the dependency graph is specified for each -attribute using the `Attribute.Builder.aspects()` function. There are a few -confusingly-named classes that participate in the process: - -1. `AspectClass` is the implementation of the aspect. It can be either in Java - (in which case it's a subclass) or in Starlark (in which case it's an - instance of `StarlarkAspectClass`). It's analogous to - `RuleConfiguredTargetFactory`. -2. `AspectDefinition` is the definition of the aspect; it includes the - providers it requires, the providers it provides and contains a reference to - its implementation, such as the appropriate `AspectClass` instance. It's - analogous to `RuleClass`. -3. `AspectParameters` is a way to parametrize an aspect that is propagated down - the dependency graph. It's currently a string to string map. A good example - of why it's useful is protocol buffers: if a language has multiple APIs, the - information as to which API the protocol buffers should be built for should - be propagated down the dependency graph. -4. `Aspect` represents all the data that's needed to compute an aspect that - propagates down the dependency graph. It consists of the aspect class, its - definition and its parameters. -5. `RuleAspect` is the function that determines which aspects a particular rule - should propagate. It's a `Rule` -> `Aspect` function. - -A somewhat unexpected complication is that aspects can attach to other aspects; -for example, an aspect collecting the classpath for a Java IDE will probably -want to know about all the .jar files on the classpath, but some of them are -protocol buffers. In that case, the IDE aspect will want to attach to the -(`proto_library` rule + Java proto aspect) pair. - -The complexity of aspects on aspects is captured in the class -`AspectCollection`. - -### Platforms and toolchains - -Bazel supports multi-platform builds, that is, builds where there may be -multiple architectures where build actions run and multiple architectures for -which code is built. These architectures are referred to as _platforms_ in Bazel -parlance (full documentation -[here](/extending/platforms)) - -A platform is described by a key-value mapping from _constraint settings_ (such as -the concept of "CPU architecture") to _constraint values_ (such as a particular CPU -like x86\_64). We have a "dictionary" of the most commonly used constraint -settings and values in the `@platforms` repository. - -The concept of _toolchain_ comes from the fact that depending on what platforms -the build is running on and what platforms are targeted, one may need to use -different compilers; for example, a particular C++ toolchain may run on a -specific OS and be able to target some other OSes. Bazel must determine the C++ -compiler that is used based on the set execution and target platform -(documentation for toolchains -[here](/extending/toolchains)). - -In order to do this, toolchains are annotated with the set of execution and -target platform constraints they support. In order to do this, the definition of -a toolchain are split into two parts: - -1. A `toolchain()` rule that describes the set of execution and target - constraints a toolchain supports and tells what kind (such as C++ or Java) of - toolchain it is (the latter is represented by the `toolchain_type()` rule) -2. A language-specific rule that describes the actual toolchain (such as - `cc_toolchain()`) - -This is done in this way because we need to know the constraints for every -toolchain in order to do toolchain resolution and language-specific -`*_toolchain()` rules contain much more information than that, so they take more -time to load. - -Execution platforms are specified in one of the following ways: - -1. In the MODULE.bazel file using the `register_execution_platforms()` function -2. On the command line using the --extra\_execution\_platforms command line - option - -The set of available execution platforms is computed in -`RegisteredExecutionPlatformsFunction` . - -The target platform for a configured target is determined by -`PlatformOptions.computeTargetPlatform()` . It's a list of platforms because we -eventually want to support multiple target platforms, but it's not implemented -yet. - -The set of toolchains to be used for a configured target is determined by -`ToolchainResolutionFunction`. It is a function of: - -* The set of registered toolchains (in the MODULE.bazel file and the - configuration) -* The desired execution and target platforms (in the configuration) -* The set of toolchain types that are required by the configured target (in - `UnloadedToolchainContextKey)` -* The set of execution platform constraints of the configured target (the - `exec_compatible_with` attribute) and the configuration - (`--experimental_add_exec_constraints_to_targets`), in - `UnloadedToolchainContextKey` - -Its result is an `UnloadedToolchainContext`, which is essentially a map from -toolchain type (represented as a `ToolchainTypeInfo` instance) to the label of -the selected toolchain. It's called "unloaded" because it does not contain the -toolchains themselves, only their labels. - -Then the toolchains are actually loaded using `ResolvedToolchainContext.load()` -and used by the implementation of the configured target that requested them. - -We also have a legacy system that relies on there being one single "host" -configuration and target configurations being represented by various -configuration flags, such as `--cpu` . We are gradually transitioning to the above -system. In order to handle cases where people rely on the legacy configuration -values, we have implemented -[platform mappings](https://docs.google.com/document/d/1Vg_tPgiZbSrvXcJ403vZVAGlsWhH9BUDrAxMOYnO0Ls) -to translate between the legacy flags and the new-style platform constraints. -Their code is in `PlatformMappingFunction` and uses a non-Starlark "little -language". - -### Constraints - -Sometimes one wants to designate a target as being compatible with only a few -platforms. Bazel has (unfortunately) multiple mechanisms to achieve this end: - -* Rule-specific constraints -* `environment_group()` / `environment()` -* Platform constraints - -Rule-specific constraints are mostly used within Google for Java rules; they are -on their way out and they are not available in Bazel, but the source code may -contain references to it. The attribute that governs this is called -`constraints=` . - -#### environment_group() and environment() - -These rules are a legacy mechanism and are not widely used. - -All build rules can declare which "environments" they can be built for, where an -"environment" is an instance of the `environment()` rule. - -There are various ways supported environments can be specified for a rule: - -1. Through the `restricted_to=` attribute. This is the most direct form of - specification; it declares the exact set of environments the rule supports. -2. Through the `compatible_with=` attribute. This declares environments a rule - supports in addition to "standard" environments that are supported by - default. -3. Through the package-level attributes `default_restricted_to=` and - `default_compatible_with=`. -4. Through default specifications in `environment_group()` rules. Every - environment belongs to a group of thematically related peers (such as "CPU - architectures", "JDK versions" or "mobile operating systems"). The - definition of an environment group includes which of these environments - should be supported by "default" if not otherwise specified by the - `restricted_to=` / `environment()` attributes. A rule with no such - attributes inherits all defaults. -5. Through a rule class default. This overrides global defaults for all - instances of the given rule class. This can be used, for example, to make - all `*_test` rules testable without each instance having to explicitly - declare this capability. - -`environment()` is implemented as a regular rule whereas `environment_group()` -is both a subclass of `Target` but not `Rule` (`EnvironmentGroup`) and a -function that is available by default from Starlark -(`StarlarkLibrary.environmentGroup()`) which eventually creates an eponymous -target. This is to avoid a cyclic dependency that would arise because each -environment needs to declare the environment group it belongs to and each -environment group needs to declare its default environments. - -A build can be restricted to a certain environment with the -`--target_environment` command line option. - -The implementation of the constraint check is in -`RuleContextConstraintSemantics` and `TopLevelConstraintSemantics`. - -#### Platform constraints - -The current "official" way to describe what platforms a target is compatible -with is by using the same constraints used to describe toolchains and platforms. -It was implemented in pull request -[#10945](https://github.com/bazelbuild/bazel/pull/10945). - -### Visibility - -If you work on a large codebase with a lot of developers (like at Google), you -want to take care to prevent everyone else from arbitrarily depending on your -code. Otherwise, as per [Hyrum's law](https://www.hyrumslaw.com/), -people _will_ come to rely on behaviors that you considered to be implementation -details. - -Bazel supports this by the mechanism called _visibility_: you can limit which -targets can depend on a particular target using the -[visibility](/reference/be/common-definitions#common-attributes) attribute. This -attribute is a little special because, although it holds a list of labels, these -labels may encode a pattern over package names rather than a pointer to any -particular target. (Yes, this is a design flaw.) - -This is implemented in the following places: - -* The `RuleVisibility` interface represents a visibility declaration. It can - be either a constant (fully public or fully private) or a list of labels. -* Labels can refer to either package groups (predefined list of packages), to - packages directly (`//pkg:__pkg__`) or subtrees of packages - (`//pkg:__subpackages__`). This is different from the command line syntax, - which uses `//pkg:*` or `//pkg/...`. -* Package groups are implemented as their own target (`PackageGroup`) and - configured target (`PackageGroupConfiguredTarget`). We could probably - replace these with simple rules if we wanted to. Their logic is implemented - with the help of: `PackageSpecification`, which corresponds to a - single pattern like `//pkg/...`; `PackageGroupContents`, which corresponds - to a single `package_group`'s `packages` attribute; and - `PackageSpecificationProvider`, which aggregates over a `package_group` and - its transitive `includes`. -* The conversion from visibility label lists to dependencies is done in - `DependencyResolver.visitTargetVisibility` and a few other miscellaneous - places. -* The actual check is done in - `CommonPrerequisiteValidator.validateDirectPrerequisiteVisibility()` - -### Nested sets - -Oftentimes, a configured target aggregates a set of files from its dependencies, -adds its own, and wraps the aggregate set into a transitive info provider so -that configured targets that depend on it can do the same. Examples: - -* The C++ header files used for a build -* The object files that represent the transitive closure of a `cc_library` -* The set of .jar files that need to be on the classpath for a Java rule to - compile or run -* The set of Python files in the transitive closure of a Python rule - -If we did this the naive way by using, for example, `List` or `Set`, we'd end up with -quadratic memory usage: if there is a chain of N rules and each rule adds a -file, we'd have 1+2+...+N collection members. - -In order to get around this problem, we came up with the concept of a -`NestedSet`. It's a data structure that is composed of other `NestedSet` -instances and some members of its own, thereby forming a directed acyclic graph -of sets. They are immutable and their members can be iterated over. We define -multiple iteration order (`NestedSet.Order`): preorder, postorder, topological -(a node always comes after its ancestors) and "don't care, but it should be the -same each time". - -The same data structure is called `depset` in Starlark. - -### Artifacts and Actions - -The actual build consists of a set of commands that need to be run to produce -the output the user wants. The commands are represented as instances of the -class `Action` and the files are represented as instances of the class -`Artifact`. They are arranged in a bipartite, directed, acyclic graph called the -"action graph". - -Artifacts come in two kinds: source artifacts (ones that are available -before Bazel starts executing) and derived artifacts (ones that need to be -built). Derived artifacts can themselves be multiple kinds: - -1. **Regular artifacts. **These are checked for up-to-dateness by computing - their checksum, with mtime as a shortcut; we don't checksum the file if its - ctime hasn't changed. -2. **Unresolved symlink artifacts.** These are checked for up-to-dateness by - calling readlink(). Unlike regular artifacts, these can be dangling - symlinks. Usually used in cases where one then packs up some files into an - archive of some sort. -3. **Tree artifacts.** These are not single files, but directory trees. They - are checked for up-to-dateness by checking the set of files in it and their - contents. They are represented as a `TreeArtifact`. -4. **Constant metadata artifacts.** Changes to these artifacts don't trigger a - rebuild. This is used exclusively for build stamp information: we don't want - to do a rebuild just because the current time changed. - -There is no fundamental reason why source artifacts cannot be tree artifacts or -unresolved symlink artifacts, it's just that we haven't implemented it yet (we -should, though -- referencing a source directory in a `BUILD` file is one of the -few known long-standing incorrectness issues with Bazel; we have an -implementation that kind of works which is enabled by the -`BAZEL_TRACK_SOURCE_DIRECTORIES=1` JVM property) - -A notable kind of `Artifact` are middlemen. They are indicated by `Artifact` -instances that are the outputs of `MiddlemanAction`. They are used for one -special case: - -* Runfiles middlemen are used to ensure the presence of a runfiles tree so - that one does not separately need to depend on the output manifest and every - single artifact referenced by the runfiles tree. - -Actions are best understood as a command that needs to be run, the environment -it needs and the set of outputs it produces. The following things are the main -components of the description of an action: - -* The command line that needs to be run -* The input artifacts it needs -* The environment variables that need to be set -* Annotations that describe the environment (such as platform) it needs to run in - \ - -There are also a few other special cases, like writing a file whose content is -known to Bazel. They are a subclass of `AbstractAction`. Most of the actions are -a `SpawnAction` or a `StarlarkAction` (the same, they should arguably not be -separate classes), although Java and C++ have their own action types -(`JavaCompileAction`, `CppCompileAction` and `CppLinkAction`). - -We eventually want to move everything to `SpawnAction`; `JavaCompileAction` is -pretty close, but C++ is a bit of a special-case due to .d file parsing and -include scanning. - -The action graph is mostly "embedded" into the Skyframe graph: conceptually, the -execution of an action is represented as an invocation of -`ActionExecutionFunction`. The mapping from an action graph dependency edge to a -Skyframe dependency edge is described in -`ActionExecutionFunction.getInputDeps()` and `Artifact.key()` and has a few -optimizations in order to keep the number of Skyframe edges low: - -* Derived artifacts do not have their own `SkyValue`s. Instead, - `Artifact.getGeneratingActionKey()` is used to find out the key for the - action that generates it -* Nested sets have their own Skyframe key. - -### Shared actions - -Some actions are generated by multiple configured targets; Starlark rules are -more limited since they are only allowed to put their derived actions into a -directory determined by their configuration and their package (but even so, -rules in the same package can conflict), but rules implemented in Java can put -derived artifacts anywhere. - -This is considered to be a misfeature, but getting rid of it is really hard -because it produces significant savings in execution time when, for example, a -source file needs to be processed somehow and that file is referenced by -multiple rules (handwave-handwave). This comes at the cost of some RAM: each -instance of a shared action needs to be stored in memory separately. - -If two actions generate the same output file, they must be exactly the same: -have the same inputs, the same outputs and run the same command line. This -equivalence relation is implemented in `Actions.canBeShared()` and it is -verified between the analysis and execution phases by looking at every Action. -This is implemented in `SkyframeActionExecutor.findAndStoreArtifactConflicts()` -and is one of the few places in Bazel that requires a "global" view of the -build. - -## The execution phase - -This is when Bazel actually starts running build actions, such as commands that -produce outputs. - -The first thing Bazel does after the analysis phase is to determine what -Artifacts need to be built. The logic for this is encoded in -`TopLevelArtifactHelper`; roughly speaking, it's the `filesToBuild` of the -configured targets on the command line and the contents of a special output -group for the explicit purpose of expressing "if this target is on the command -line, build these artifacts". - -The next step is creating the execution root. Since Bazel has the option to read -source packages from different locations in the file system (`--package_path`), -it needs to provide locally executed actions with a full source tree. This is -handled by the class `SymlinkForest` and works by taking note of every target -used in the analysis phase and building up a single directory tree that symlinks -every package with a used target from its actual location. An alternative would -be to pass the correct paths to commands (taking `--package_path` into account). -This is undesirable because: - -* It changes action command lines when a package is moved from a package path - entry to another (used to be a common occurrence) -* It results in different command lines if an action is run remotely than if - it's run locally -* It requires a command line transformation specific to the tool in use - (consider the difference between such as Java classpaths and C++ include paths) -* Changing the command line of an action invalidates its action cache entry -* `--package_path` is slowly and steadily being deprecated - -Then, Bazel starts traversing the action graph (the bipartite, directed graph -composed of actions and their input and output artifacts) and running actions. -The execution of each action is represented by an instance of the `SkyValue` -class `ActionExecutionValue`. - -Since running an action is expensive, we have a few layers of caching that can -be hit behind Skyframe: - -* `ActionExecutionFunction.stateMap` contains data to make Skyframe restarts - of `ActionExecutionFunction` cheap -* The local action cache contains data about the state of the file system -* Remote execution systems usually also contain their own cache - -### The local action cache - -This cache is another layer that sits behind Skyframe; even if an action is -re-executed in Skyframe, it can still be a hit in the local action cache. It -represents the state of the local file system and it's serialized to disk which -means that when one starts up a new Bazel server, one can get local action cache -hits even though the Skyframe graph is empty. - -This cache is checked for hits using the method -`ActionCacheChecker.getTokenIfNeedToExecute()` . - -Contrary to its name, it's a map from the path of a derived artifact to the -action that emitted it. The action is described as: - -1. The set of its input and output files and their checksum -2. Its "action key", which is usually the command line that was executed, but - in general, represents everything that's not captured by the checksum of the - input files (such as for `FileWriteAction`, it's the checksum of the data - that's written) - -There is also a highly experimental "top-down action cache" that is still under -development, which uses transitive hashes to avoid going to the cache as many -times. - -### Input discovery and input pruning - -Some actions are more complicated than just having a set of inputs. Changes to -the set of inputs of an action come in two forms: - -* An action may discover new inputs before its execution or decide that some - of its inputs are not actually necessary. The canonical example is C++, - where it's better to make an educated guess about what header files a C++ - file uses from its transitive closure so that we don't heed to send every - file to remote executors; therefore, we have an option not to register every - header file as an "input", but scan the source file for transitively - included headers and only mark those header files as inputs that are - mentioned in `#include` statements (we overestimate so that we don't need to - implement a full C preprocessor) This option is currently hard-wired to - "false" in Bazel and is only used at Google. -* An action may realize that some files were not used during its execution. In - C++, this is called ".d files": the compiler tells which header files were - used after the fact, and in order to avoid the embarrassment of having worse - incrementality than Make, Bazel makes use of this fact. This offers a better - estimate than the include scanner because it relies on the compiler. - -These are implemented using methods on Action: - -1. `Action.discoverInputs()` is called. It should return a nested set of - Artifacts that are determined to be required. These must be source artifacts - so that there are no dependency edges in the action graph that don't have an - equivalent in the configured target graph. -2. The action is executed by calling `Action.execute()`. -3. At the end of `Action.execute()`, the action can call - `Action.updateInputs()` to tell Bazel that not all of its inputs were - needed. This can result in incorrect incremental builds if a used input is - reported as unused. - -When an action cache returns a hit on a fresh Action instance (such as created -after a server restart), Bazel calls `updateInputs()` itself so that the set of -inputs reflects the result of input discovery and pruning done before. - -Starlark actions can make use of the facility to declare some inputs as unused -using the `unused_inputs_list=` argument of -`ctx.actions.run()`. - -### Various ways to run actions: Strategies/ActionContexts - -Some actions can be run in different ways. For example, a command line can be -executed locally, locally but in various kinds of sandboxes, or remotely. The -concept that embodies this is called an `ActionContext` (or `Strategy`, since we -successfully went only halfway with a rename...) - -The life cycle of an action context is as follows: - -1. When the execution phase is started, `BlazeModule` instances are asked what - action contexts they have. This happens in the constructor of - `ExecutionTool`. Action context types are identified by a Java `Class` - instance that refers to a sub-interface of `ActionContext` and which - interface the action context must implement. -2. The appropriate action context is selected from the available ones and is - forwarded to `ActionExecutionContext` and `BlazeExecutor` . -3. Actions request contexts using `ActionExecutionContext.getContext()` and - `BlazeExecutor.getStrategy()` (there should really be only one way to do - it…) - -Strategies are free to call other strategies to do their jobs; this is used, for -example, in the dynamic strategy that starts actions both locally and remotely, -then uses whichever finishes first. - -One notable strategy is the one that implements persistent worker processes -(`WorkerSpawnStrategy`). The idea is that some tools have a long startup time -and should therefore be reused between actions instead of starting one anew for -every action (This does represent a potential correctness issue, since Bazel -relies on the promise of the worker process that it doesn't carry observable -state between individual requests) - -If the tool changes, the worker process needs to be restarted. Whether a worker -can be reused is determined by computing a checksum for the tool used using -`WorkerFilesHash`. It relies on knowing which inputs of the action represent -part of the tool and which represent inputs; this is determined by the creator -of the Action: `Spawn.getToolFiles()` and the runfiles of the `Spawn` are -counted as parts of the tool. - -More information about strategies (or action contexts!): - -* Information about various strategies for running actions is available - [here](https://jmmv.dev/2019/12/bazel-strategies.html). -* Information about the dynamic strategy, one where we run an action both - locally and remotely to see whichever finishes first is available - [here](https://jmmv.dev/series.html#Bazel%20dynamic%20execution). -* Information about the intricacies of executing actions locally is available - [here](https://jmmv.dev/2019/11/bazel-process-wrapper.html). - -### The local resource manager - -Bazel _can_ run many actions in parallel. The number of local actions that -_should_ be run in parallel differs from action to action: the more resources an -action requires, the less instances should be running at the same time to avoid -overloading the local machine. - -This is implemented in the class `ResourceManager`: each action has to be -annotated with an estimate of the local resources it requires in the form of a -`ResourceSet` instance (CPU and RAM). Then when action contexts do something -that requires local resources, they call `ResourceManager.acquireResources()` -and are blocked until the required resources are available. - -A more detailed description of local resource management is available -[here](https://jmmv.dev/2019/12/bazel-local-resources.html). - -### The structure of the output directory - -Each action requires a separate place in the output directory where it places -its outputs. The location of derived artifacts is usually as follows: - -``` -$EXECROOT/bazel-out//bin// -``` - -How is the name of the directory that is associated with a particular -configuration determined? There are two conflicting desirable properties: - -1. If two configurations can occur in the same build, they should have - different directories so that both can have their own version of the same - action; otherwise, if the two configurations disagree about such as the command - line of an action producing the same output file, Bazel doesn't know which - action to choose (an "action conflict") -2. If two configurations represent "roughly" the same thing, they should have - the same name so that actions executed in one can be reused for the other if - the command lines match: for example, changes to the command line options to - the Java compiler should not result in C++ compile actions being re-run. - -So far, we have not come up with a principled way of solving this problem, which -has similarities to the problem of configuration trimming. A longer discussion -of options is available -[here](https://docs.google.com/document/d/1fZI7wHoaS-vJvZy9SBxaHPitIzXE_nL9v4sS4mErrG4/edit). -The main problematic areas are Starlark rules (whose authors usually aren't -intimately familiar with Bazel) and aspects, which add another dimension to the -space of things that can produce the "same" output file. - -The current approach is that the path segment for the configuration is -`-` with various suffixes added so that configuration -transitions implemented in Java don't result in action conflicts. In addition, a -checksum of the set of Starlark configuration transitions is added so that users -can't cause action conflicts. It is far from perfect. This is implemented in -`OutputDirectories.buildMnemonic()` and relies on each configuration fragment -adding its own part to the name of the output directory. - -## Tests - -Bazel has rich support for running tests. It supports: - -* Running tests remotely (if a remote execution backend is available) -* Running tests multiple times in parallel (for deflaking or gathering timing - data) -* Sharding tests (splitting test cases in same test over multiple processes - for speed) -* Re-running flaky tests -* Grouping tests into test suites - -Tests are regular configured targets that have a TestProvider, which describes -how the test should be run: - -* The artifacts whose building result in the test being run. This is a "cache - status" file that contains a serialized `TestResultData` message -* The number of times the test should be run -* The number of shards the test should be split into -* Some parameters about how the test should be run (such as the test timeout) - -### Determining which tests to run - -Determining which tests are run is an elaborate process. - -First, during target pattern parsing, test suites are recursively expanded. The -expansion is implemented in `TestsForTargetPatternFunction`. A somewhat -surprising wrinkle is that if a test suite declares no tests, it refers to -_every_ test in its package. This is implemented in `Package.beforeBuild()` by -adding an implicit attribute called `$implicit_tests` to test suite rules. - -Then, tests are filtered for size, tags, timeout and language according to the -command line options. This is implemented in `TestFilter` and is called from -`TargetPatternPhaseFunction.determineTests()` during target parsing and the -result is put into `TargetPatternPhaseValue.getTestsToRunLabels()`. The reason -why rule attributes which can be filtered for are not configurable is that this -happens before the analysis phase, therefore, the configuration is not -available. - -This is then processed further in `BuildView.createResult()`: targets whose -analysis failed are filtered out and tests are split into exclusive and -non-exclusive tests. It's then put into `AnalysisResult`, which is how -`ExecutionTool` knows which tests to run. - -In order to lend some transparency to this elaborate process, the `tests()` -query operator (implemented in `TestsFunction`) is available to tell which tests -are run when a particular target is specified on the command line. It's -unfortunately a reimplementation, so it probably deviates from the above in -multiple subtle ways. - -### Running tests - -The way the tests are run is by requesting cache status artifacts. This then -results in the execution of a `TestRunnerAction`, which eventually calls the -`TestActionContext` chosen by the `--test_strategy` command line option that -runs the test in the requested way. - -Tests are run according to an elaborate protocol that uses environment variables -to tell tests what's expected from them. A detailed description of what Bazel -expects from tests and what tests can expect from Bazel is available -[here](/reference/test-encyclopedia). At the -simplest, an exit code of 0 means success, anything else means failure. - -In addition to the cache status file, each test process emits a number of other -files. They are put in the "test log directory" which is the subdirectory called -`testlogs` of the output directory of the target configuration: - -* `test.xml`, a JUnit-style XML file detailing the individual test cases in - the test shard -* `test.log`, the console output of the test. stdout and stderr are not - separated. -* `test.outputs`, the "undeclared outputs directory"; this is used by tests - that want to output files in addition to what they print to the terminal. - -There are two things that can happen during test execution that cannot during -building regular targets: exclusive test execution and output streaming. - -Some tests need to be executed in exclusive mode, for example not in parallel with -other tests. This can be elicited either by adding `tags=["exclusive"]` to the -test rule or running the test with `--test_strategy=exclusive` . Each exclusive -test is run by a separate Skyframe invocation requesting the execution of the -test after the "main" build. This is implemented in -`SkyframeExecutor.runExclusiveTest()`. - -Unlike regular actions, whose terminal output is dumped when the action -finishes, the user can request the output of tests to be streamed so that they -get informed about the progress of a long-running test. This is specified by the -`--test_output=streamed` command line option and implies exclusive test -execution so that outputs of different tests are not interspersed. - -This is implemented in the aptly-named `StreamedTestOutput` class and works by -polling changes to the `test.log` file of the test in question and dumping new -bytes to the terminal where Bazel rules. - -Results of the executed tests are available on the event bus by observing -various events (such as `TestAttempt`, `TestResult` or `TestingCompleteEvent`). -They are dumped to the Build Event Protocol and they are emitted to the console -by `AggregatingTestListener`. - -### Coverage collection - -Coverage is reported by the tests in LCOV format in the files -`bazel-testlogs/$PACKAGE/$TARGET/coverage.dat` . - -To collect coverage, each test execution is wrapped in a script called -`collect_coverage.sh` . - -This script sets up the environment of the test to enable coverage collection -and determine where the coverage files are written by the coverage runtime(s). -It then runs the test. A test may itself run multiple subprocesses and consist -of parts written in multiple different programming languages (with separate -coverage collection runtimes). The wrapper script is responsible for converting -the resulting files to LCOV format if necessary, and merges them into a single -file. - -The interposition of `collect_coverage.sh` is done by the test strategies and -requires `collect_coverage.sh` to be on the inputs of the test. This is -accomplished by the implicit attribute `:coverage_support` which is resolved to -the value of the configuration flag `--coverage_support` (see -`TestConfiguration.TestOptions.coverageSupport`) - -Some languages do offline instrumentation, meaning that the coverage -instrumentation is added at compile time (such as C++) and others do online -instrumentation, meaning that coverage instrumentation is added at execution -time. - -Another core concept is _baseline coverage_. This is the coverage of a library, -binary, or test if no code in it was run. The problem it solves is that if you -want to compute the test coverage for a binary, it is not enough to merge the -coverage of all of the tests because there may be code in the binary that is not -linked into any test. Therefore, what we do is to emit a coverage file for every -binary which contains only the files we collect coverage for with no covered -lines. The baseline coverage file for a target is at -`bazel-testlogs/$PACKAGE/$TARGET/baseline_coverage.dat` . It is also generated -for binaries and libraries in addition to tests if you pass the -`--nobuild_tests_only` flag to Bazel. - -Baseline coverage is currently broken. - -We track two groups of files for coverage collection for each rule: the set of -instrumented files and the set of instrumentation metadata files. - -The set of instrumented files is just that, a set of files to instrument. For -online coverage runtimes, this can be used at runtime to decide which files to -instrument. It is also used to implement baseline coverage. - -The set of instrumentation metadata files is the set of extra files a test needs -to generate the LCOV files Bazel requires from it. In practice, this consists of -runtime-specific files; for example, gcc emits .gcno files during compilation. -These are added to the set of inputs of test actions if coverage mode is -enabled. - -Whether or not coverage is being collected is stored in the -`BuildConfiguration`. This is handy because it is an easy way to change the test -action and the action graph depending on this bit, but it also means that if -this bit is flipped, all targets need to be re-analyzed (some languages, such as -C++ require different compiler options to emit code that can collect coverage, -which mitigates this issue somewhat, since then a re-analysis is needed anyway). - -The coverage support files are depended on through labels in an implicit -dependency so that they can be overridden by the invocation policy, which allows -them to differ between the different versions of Bazel. Ideally, these -differences would be removed, and we standardized on one of them. - -We also generate a "coverage report" which merges the coverage collected for -every test in a Bazel invocation. This is handled by -`CoverageReportActionFactory` and is called from `BuildView.createResult()` . It -gets access to the tools it needs by looking at the `:coverage_report_generator` -attribute of the first test that is executed. - -## The query engine - -Bazel has a -[little language](/query/guide) -used to ask it various things about various graphs. The following query kinds -are provided: - -* `bazel query` is used to investigate the target graph -* `bazel cquery` is used to investigate the configured target graph -* `bazel aquery` is used to investigate the action graph - -Each of these is implemented by subclassing `AbstractBlazeQueryEnvironment`. -Additional additional query functions can be done by subclassing `QueryFunction` -. In order to allow streaming query results, instead of collecting them to some -data structure, a `query2.engine.Callback` is passed to `QueryFunction`, which -calls it for results it wants to return. - -The result of a query can be emitted in various ways: labels, labels and rule -classes, XML, protobuf and so on. These are implemented as subclasses of -`OutputFormatter`. - -A subtle requirement of some query output formats (proto, definitely) is that -Bazel needs to emit _all _the information that package loading provides so that -one can diff the output and determine whether a particular target has changed. -As a consequence, attribute values need to be serializable, which is why there -are only so few attribute types without any attributes having complex Starlark -values. The usual workaround is to use a label, and attach the complex -information to the rule with that label. It's not a very satisfying workaround -and it would be very nice to lift this requirement. - -## The module system - -Bazel can be extended by adding modules to it. Each module must subclass -`BlazeModule` (the name is a relic of the history of Bazel when it used to be -called Blaze) and gets information about various events during the execution of -a command. - -They are mostly used to implement various pieces of "non-core" functionality -that only some versions of Bazel (such as the one we use at Google) need: - -* Interfaces to remote execution systems -* New commands - -The set of extension points `BlazeModule` offers is somewhat haphazard. Don't -use it as an example of good design principles. - -## The event bus - -The main way BlazeModules communicate with the rest of Bazel is by an event bus -(`EventBus`): a new instance is created for every build, various parts of Bazel -can post events to it and modules can register listeners for the events they are -interested in. For example, the following things are represented as events: - -* The list of build targets to be built has been determined - (`TargetParsingCompleteEvent`) -* The top-level configurations have been determined - (`BuildConfigurationEvent`) -* A target was built, successfully or not (`TargetCompleteEvent`) -* A test was run (`TestAttempt`, `TestSummary`) - -Some of these events are represented outside of Bazel in the -[Build Event Protocol](/remote/bep) -(they are `BuildEvent`s). This allows not only `BlazeModule`s, but also things -outside the Bazel process to observe the build. They are accessible either as a -file that contains protocol messages or Bazel can connect to a server (called -the Build Event Service) to stream events. - -This is implemented in the `build.lib.buildeventservice` and -`build.lib.buildeventstream` Java packages. - -## External repositories - -Note: The information in this section is out of date, as code in this area has -undergone extensive change in the past couple of years. Please refer to -[external dependencies overview](/external/overview) for more up-to-date -information. - -Whereas Bazel was originally designed to be used in a monorepo (a single source -tree containing everything one needs to build), Bazel lives in a world where -this is not necessarily true. "External repositories" are an abstraction used to -bridge these two worlds: they represent code that is necessary for the build but -is not in the main source tree. - -### The WORKSPACE file - -The set of external repositories is determined by parsing the WORKSPACE file. -For example, a declaration like this: - -``` - local_repository(name="foo", path="/foo/bar") -``` - -Results in the repository called `@foo` being available. Where this gets -complicated is that one can define new repository rules in Starlark files, which -can then be used to load new Starlark code, which can be used to define new -repository rules and so on… - -To handle this case, the parsing of the WORKSPACE file (in -`WorkspaceFileFunction`) is split up into chunks delineated by `load()` -statements. The chunk index is indicated by `WorkspaceFileKey.getIndex()` and -computing `WorkspaceFileFunction` until index X means evaluating it until the -Xth `load()` statement. - -### Fetching repositories - -Before the code of the repository is available to Bazel, it needs to be -_fetched_. This results in Bazel creating a directory under -`$OUTPUT_BASE/external/`. - -Fetching the repository happens in the following steps: - -1. `PackageLookupFunction` realizes that it needs a repository and creates a - `RepositoryName` as a `SkyKey`, which invokes `RepositoryLoaderFunction` -2. `RepositoryLoaderFunction` forwards the request to - `RepositoryDelegatorFunction` for unclear reasons (the code says it's to - avoid re-downloading things in case of Skyframe restarts, but it's not a - very solid reasoning) -3. `RepositoryDelegatorFunction` finds out the repository rule it's asked to - fetch by iterating over the chunks of the WORKSPACE file until the requested - repository is found -4. The appropriate `RepositoryFunction` is found that implements the repository - fetching; it's either the Starlark implementation of the repository or a - hard-coded map for repositories that are implemented in Java. - -There are various layers of caching since fetching a repository can be very -expensive: - -1. There is a cache for downloaded files that is keyed by their checksum - (`RepositoryCache`). This requires the checksum to be available in the - WORKSPACE file, but that's good for hermeticity anyway. This is shared by - every Bazel server instance on the same workstation, regardless of which - workspace or output base they are running in. -2. A "marker file" is written for each repository under `$OUTPUT_BASE/external` - that contains a checksum of the rule that was used to fetch it. If the Bazel - server restarts but the checksum does not change, it's not re-fetched. This - is implemented in `RepositoryDelegatorFunction.DigestWriter` . -3. The `--distdir` command line option designates another cache that is used to - look up artifacts to be downloaded. This is useful in enterprise settings - where Bazel should not fetch random things from the Internet. This is - implemented by `DownloadManager` . - -Once a repository is downloaded, the artifacts in it are treated as source -artifacts. This poses a problem because Bazel usually checks for up-to-dateness -of source artifacts by calling stat() on them, and these artifacts are also -invalidated when the definition of the repository they are in changes. Thus, -`FileStateValue`s for an artifact in an external repository need to depend on -their external repository. This is handled by `ExternalFilesHelper`. - -### Repository mappings - -It can happen that multiple repositories want to depend on the same repository, -but in different versions (this is an instance of the "diamond dependency -problem"). For example, if two binaries in separate repositories in the build -want to depend on Guava, they will presumably both refer to Guava with labels -starting `@guava//` and expect that to mean different versions of it. - -Therefore, Bazel allows one to re-map external repository labels so that the -string `@guava//` can refer to one Guava repository (such as `@guava1//`) in the -repository of one binary and another Guava repository (such as `@guava2//`) the -repository of the other. - -Alternatively, this can also be used to **join** diamonds. If a repository -depends on `@guava1//`, and another depends on `@guava2//`, repository mapping -allows one to re-map both repositories to use a canonical `@guava//` repository. - -The mapping is specified in the WORKSPACE file as the `repo_mapping` attribute -of individual repository definitions. It then appears in Skyframe as a member of -`WorkspaceFileValue`, where it is plumbed to: - -* `Package.Builder.repositoryMapping` which is used to transform label-valued - attributes of rules in the package by - `RuleClass.populateRuleAttributeValues()` -* `Package.repositoryMapping` which is used in the analysis phase (for - resolving things like `$(location)` which are not parsed in the loading - phase) -* `BzlLoadFunction` for resolving labels in load() statements - -## JNI bits - -The server of Bazel is _mostly_ written in Java. The exception is the parts that -Java cannot do by itself or couldn't do by itself when we implemented it. This -is mostly limited to interaction with the file system, process control and -various other low-level things. - -The C++ code lives under src/main/native and the Java classes with native -methods are: - -* `NativePosixFiles` and `NativePosixFileSystem` -* `ProcessUtils` -* `WindowsFileOperations` and `WindowsFileProcesses` -* `com.google.devtools.build.lib.platform` - -## Console output - -Emitting console output seems like a simple thing, but the confluence of running -multiple processes (sometimes remotely), fine-grained caching, the desire to -have a nice and colorful terminal output and having a long-running server makes -it non-trivial. - -Right after the RPC call comes in from the client, two `RpcOutputStream` -instances are created (for stdout and stderr) that forward the data printed into -them to the client. These are then wrapped in an `OutErr` (an (stdout, stderr) -pair). Anything that needs to be printed on the console goes through these -streams. Then these streams are handed over to -`BlazeCommandDispatcher.execExclusively()`. - -Output is by default printed with ANSI escape sequences. When these are not -desired (`--color=no`), they are stripped by an `AnsiStrippingOutputStream`. In -addition, `System.out` and `System.err` are redirected to these output streams. -This is so that debugging information can be printed using -`System.err.println()` and still end up in the terminal output of the client -(which is different from that of the server). Care is taken that if a process -produces binary output (such as `bazel query --output=proto`), no munging of stdout -takes place. - -Short messages (errors, warnings and the like) are expressed through the -`EventHandler` interface. Notably, these are different from what one posts to -the `EventBus` (this is confusing). Each `Event` has an `EventKind` (error, -warning, info, and a few others) and they may have a `Location` (the place in -the source code that caused the event to happen). - -Some `EventHandler` implementations store the events they received. This is used -to replay information to the UI caused by various kinds of cached processing, -for example, the warnings emitted by a cached configured target. - -Some `EventHandler`s also allow posting events that eventually find their way to -the event bus (regular `Event`s do _not _appear there). These are -implementations of `ExtendedEventHandler` and their main use is to replay cached -`EventBus` events. These `EventBus` events all implement `Postable`, but not -everything that is posted to `EventBus` necessarily implements this interface; -only those that are cached by an `ExtendedEventHandler` (it would be nice and -most of the things do; it's not enforced, though) - -Terminal output is _mostly_ emitted through `UiEventHandler`, which is -responsible for all the fancy output formatting and progress reporting Bazel -does. It has two inputs: - -* The event bus -* The event stream piped into it through Reporter - -The only direct connection the command execution machinery (for example the rest of -Bazel) has to the RPC stream to the client is through `Reporter.getOutErr()`, -which allows direct access to these streams. It's only used when a command needs -to dump large amounts of possible binary data (such as `bazel query`). - -## Profiling Bazel - -Bazel is fast. Bazel is also slow, because builds tend to grow until just the -edge of what's bearable. For this reason, Bazel includes a profiler which can be -used to profile builds and Bazel itself. It's implemented in a class that's -aptly named `Profiler`. It's turned on by default, although it records only -abridged data so that its overhead is tolerable; The command line -`--record_full_profiler_data` makes it record everything it can. - -It emits a profile in the Chrome profiler format; it's best viewed in Chrome. -It's data model is that of task stacks: one can start tasks and end tasks and -they are supposed to be neatly nested within each other. Each Java thread gets -its own task stack. **TODO:** How does this work with actions and -continuation-passing style? - -The profiler is started and stopped in `BlazeRuntime.initProfiler()` and -`BlazeRuntime.afterCommand()` respectively and attempts to be live for as long -as possible so that we can profile everything. To add something to the profile, -call `Profiler.instance().profile()`. It returns a `Closeable`, whose closure -represents the end of the task. It's best used with try-with-resources -statements. - -We also do rudimentary memory profiling in `MemoryProfiler`. It's also always on -and it mostly records maximum heap sizes and GC behavior. - -## Testing Bazel - -Bazel has two main kinds of tests: ones that observe Bazel as a "black box" and -ones that only run the analysis phase. We call the former "integration tests" -and the latter "unit tests", although they are more like integration tests that -are, well, less integrated. We also have some actual unit tests, where they are -necessary. - -Of integration tests, we have two kinds: - -1. Ones implemented using a very elaborate bash test framework under - `src/test/shell` -2. Ones implemented in Java. These are implemented as subclasses of - `BuildIntegrationTestCase` - -`BuildIntegrationTestCase` is the preferred integration testing framework as it -is well-equipped for most testing scenarios. As it is a Java framework, it -provides debuggability and seamless integration with many common development -tools. There are many examples of `BuildIntegrationTestCase` classes in the -Bazel repository. - -Analysis tests are implemented as subclasses of `BuildViewTestCase`. There is a -scratch file system you can use to write `BUILD` files, then various helper -methods can request configured targets, change the configuration and assert -various things about the result of the analysis. diff --git a/8.4.2/contribute/design-documents.mdx b/8.4.2/contribute/design-documents.mdx deleted file mode 100644 index 1fe70b9..0000000 --- a/8.4.2/contribute/design-documents.mdx +++ /dev/null @@ -1,254 +0,0 @@ ---- -title: 'Design Documents' ---- - - - -If you're planning to add, change, or remove a user-facing feature, or make a -*significant architectural change* to Bazel, you **must** write a design -document and have it reviewed before you can submit the change. - -Here are some examples of significant changes: - -* Addition or deletion of native build rules -* Breaking-changes to native rules -* Changes to a native build rule semantics that affect the behavior of more - than a single rule -* Changes to Bazel's rule definition API -* Changes to the APIs that Bazel uses to connect to other systems -* Changes to the Starlark language, semantics, or APIs -* Changes that could have a pervasive effect on Bazel performance or memory - usage (for better or for worse) -* Changes to widely used internal APIs -* Changes to flags and command-line interface. - -## Reasons for design reviews - -When you write a design document, you can coordinate with other Bazel developers -and seek guidance from Bazel's core team. For example, when a proposal adds, -removes, or modifies any function or object available in BUILD, MODULE.bazel, or -bzl files, add the [Starlark team](maintainers-guide.md) as reviewers. -Design documents are reviewed before submission because: - -* Bazel is a very complex system; seemingly innocuous local changes can have - significant global consequences. -* The team gets many feature requests from users; such requests need to be - evaluated not only for technical feasibility but importance with regards to - other feature requests. -* Bazel features are frequently implemented by people outside the core team; - such contributors have widely varying levels of Bazel expertise. -* The Bazel team itself has varying levels of expertise; no single team member - has a complete understanding of every corner of Bazel. -* Changes to Bazel must account for backward compatibility and avoid breaking - changes. - -Bazel's design review policy helps to maximize the likelihood that: - -* all feature requests get a baseline level of scrutiny. -* the right people will weigh in on designs before we've invested in an - implementation that may not work. - -To help you get started, take a look at the design documents in the -[Bazel Proposals Repository](https://github.com/bazelbuild/proposals). -Designs are works in progress, so implementation details can change over time -and with feedback. The published design documents capture the initial design, -and *not* the ongoing changes as designs are implemented. Always go to the -documentation for descriptions of current Bazel functionality. - -## Contributor Workflow - -As a contributor, you can write a design document, send pull requests and -request reviewers for your proposal. - -### Write the design document - -All design documents must have a header that includes: - -* author -* date of last major change -* list of reviewers, including one (and only one) - [lead reviewer](#lead-reviewer) -* current status (_draft_, _in review_, _approved_, _rejected_, - _being implemented_, _implemented_) -* link to discussion thread (_to be added after the announcement_) - -The document can be written either [as a world-readable Google Doc](#gdocs) -or [using Markdown](#markdown). Read below about for a -[Markdown / Google Docs comparison](#markdown-versus-gdocs). - -Proposals that have a user-visible impact must have a section documenting the -impact on backward compatibility (and a rollout plan if needed). - -### Create a Pull Request - -Share your design doc by creating a pull request (PR) to add the document to -[the design index](https://github.com/bazelbuild/proposals). Add -your markdown file or a document link to your PR. - -When possible, [choose a lead reviewer](#lead-reviewer). -and cc other reviewers. If you don't choose a lead reviewer, a Bazel -maintainer will assign one to your PR. - -After you create your PR, reviewers can make preliminary comments during the -code review. For example, the lead reviewer can suggest extra reviewers, or -point out missing information. The lead reviewer approves the PR when they -believe the review process can start. This doesn't mean the proposal is perfect -or will be approved; it means that the proposal contains enough information to -start the discussion. - -### Announce the new proposal - -Send an announcement to -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) when -the PR is submitted. - -You may copy other groups (for example, -[bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss), -to get feedback from Bazel end-users). - -### Iterate with reviewers - -Anyone interested can comment on your proposal. Try to answer questions, -clarify the proposal, and address concerns. - -Discussion should happen on the announcement thread. If the proposal is in a -Google Doc, comments may be used instead (Note that anonymous comments are -allowed). - -### Update the status - -Create a new PR to update the status of the proposal, when iteration is -complete. Send the PR to the same lead reviewer and cc the other reviewers. - -To officially accept the proposal, the lead reviewer approves the PR after -ensuring that the other reviewers agree with the decision. - -There must be at least 1 week between the first announcement and the approval of -a proposal. This ensures that users had enough time to read the document and -share their concerns. - -Implementation can begin before the proposal is accepted, for example as a -proof-of-concept or an experimentation. However, you cannot submit the change -before the review is complete. - -### Choosing a lead reviewer - -A lead reviewer should be a domain expert who is: - -* Knowledgeable of the relevant subsystems -* Objective and capable of providing constructive feedback -* Available for the entire review period to lead the process - -Consider checking the contacts for various [team -labels](/contribute/maintainers-guide#team-labels). - -## Markdown vs Google Docs - -Decide what works best for you, since both are accepted. - -Benefits of using Google Docs: - -* Effective for brainstorming, since it is easy to get started with. -* Collaborative editing. -* Quick iteration. -* Easy way to suggest edits. - -Benefits of using Markdown files: - -* Clean URLs for linking. -* Explicit record of revisions. -* No forgetting to set up access rights before publicizing a link. -* Easily searchable with search engines. -* Future-proof: Plain text is not at the mercy of any specific tool - and doesn't require an Internet connection. -* It is possible to update them even if the author is not around anymore. -* They can be processed automatically (update/detect dead links, fetch - list of authors, etc.). - -You can choose to first iterate on a Google Doc, and then convert it to -Markdown for posterity. - -### Using Google Docs - -For consistency, use the [Bazel design doc template]( -https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/edit). -It includes the necessary header and creates visual -consistency with other Bazel related documents. To do that, click on **File** > -**Make a copy** or click this link to [make a copy of the design doc -template](https://docs.google.com/document/d/1cE5zrjrR40RXNg64XtRFewSv6FrLV6slGkkqxBumS1w/copy). - -To make your document readable to the world, click on -**Share** > **Advanced** > **Change…**, and -choose "On - Anyone with the link". If you allow comments on the document, -anyone can comment anonymously, even without a Google account. - -### Using Markdown - -Documents are stored on GitHub and use the -[GitHub flavor of Markdown](https://guides.github.com/features/mastering-markdown/) -([Specification](https://github.github.com/gfm/)). - -Create a PR to update an existing document. Significant changes should be -reviewed by the document reviewers. Trivial changes (such as typos, formatting) -can be approved by anyone. - -## Reviewer workflow - -A reviewer comments, reviews and approves design documents. - -### General reviewer responsibilities - -You're responsible for reviewing design documents, asking for additional -information if needed, and approving a design that passes the review process. - -#### When you receive a new proposal - -1. Take a quick look at the document. -1. Comment if critical information is missing, or if the design doesn't fit - with the goals of the project. -1. Suggest additional reviewers. -1. Approve the PR when it is ready for review. - -#### During the review process - -1. Engage in a dialogue with the design author about issues that are problematic - or require clarification. -1. If appropriate, invite comments from non-reviewers who should be aware of - the design. -1. Decide which comments must be addressed by the author as a prerequisite to - approval. -1. Write "LGTM" (_Looks Good To Me_) in the discussion thread when you are - happy with the current state of the proposal. - -Follow this process for all design review requests. Do not approve designs -affecting Bazel if they are not in the -[design index](https://github.com/bazelbuild/proposals). - -### Lead reviewer responsibilities - -You're responsible for making the go / no-go decision on implementation -of a pending design. If you're not able to do this, you should identify a -suitable delegate (reassign the PR to the delegate), or reassign the bug to a -Bazel manager for further disposition. - -#### During the review process - -1. Ensure that the comment and design iteration process moves forward - constructively. -1. Prior to approval, ensure that concerns from other reviewers have been - resolved. - -#### After approval by all reviewers - -1. Make sure there has been at least 1 week since the announcement on the - mailing list. -1. Make sure the PR updates the status. -1. Approve the PR sent by the proposal author. - -#### Rejecting designs - -1. Make sure the PR author sends a PR; or send them a PR. -1. The PR updates the status of the document. -1. Add a comment to the document explaining why the design can't be approved in - its current state, and outlining next steps, if any (such as "revisit invalid - assumptions and resubmit"). diff --git a/8.4.2/contribute/docs-style-guide.mdx b/8.4.2/contribute/docs-style-guide.mdx deleted file mode 100644 index f50c9eb..0000000 --- a/8.4.2/contribute/docs-style-guide.mdx +++ /dev/null @@ -1,217 +0,0 @@ ---- -title: 'Bazel docs style guide' ---- - - - -Thank you for contributing to Bazel's documentation. This serves as a quick -documentation style guide to get you started. For any style questions not -answered by this guide, follow the -[Google developer documentation style guide](https://developers.google.com/style). - -## Defining principles - -Bazel docs should uphold these principles: - -- **Concise.** Use as few words as possible. -- **Clear.** Use plain language. Write without jargon for a fifth-grade - reading level. -- **Consistent.** Use the same words or phrases for repeated concepts - throughout the docs. -- **Correct.** Write in a way where the content stays correct for as long as - possible by avoiding time-based information and promises for the future. - -## Writing - -This section contains basic writing tips. - -### Headings - -- Page-level headings start at H2. (H1 headings are used as page titles.) -- Make headers as short as is sensible. This way, they fit in the TOC - without wrapping. - - - Yes: Permissions - - No: A brief note on permissions - -- Use sentence case for headings - - - Yes: Set up your workspace - - No: Set Up Your Workspace - -- Try to make headings task-based or actionable. If headings are conceptual, - it may be based around understanding, but write to what the user does. - - - Yes: Preserving graph order - - No: On the preservation of graph order - -### Names - -- Capitalize proper nouns, such as Bazel and Starlark. - - - Yes: At the end of the build, Bazel prints the requested targets. - - No: At the end of the build, bazel prints the requested targets. - -- Keep it consistent. Don't introduce new names for existing concepts. Where - applicable, use the term defined in the - [Glossary](/reference/glossary). - - - For example, if you're writing about issuing commands on a - terminal, don't use both terminal and command line on the page. - -### Page scope - -- Each page should have one purpose and that should be defined at the - beginning. This helps readers find what they need quicker. - - - Yes: This page covers how to install Bazel on Windows. - - No: (No introductory sentence.) - -- At the end of the page, tell the reader what to do next. For pages where - there is no clear action, you can include links to similar concepts, - examples, or other avenues for exploration. - -### Subject - -In Bazel documentation, the audience should primarily be users—the people using -Bazel to build their software. - -- Address your reader as "you". (If for some reason you can't use "you", - use gender-neutral language, such as they.) - - Yes: To build Java code using Bazel, - you must install a JDK. - - **MAYBE:** For users to build Java code with Bazel, they must install a JDK. - - No: For a user to build Java code with - Bazel, he or she must install a JDK. - -- If your audience is NOT general Bazel users, define the audience at the - beginning of the page or in the section. Other audiences can include - maintainers, contributors, migrators, or other roles. -- Avoid "we". In user docs, there is no author; just tell people what's - possible. - - Yes: As Bazel evolves, you should update your code base to maintain - compatibility. - - No: Bazel is evolving, and we will make changes to Bazel that at - times will be incompatible and require some changes from Bazel users. - -### Temporal - -Where possible, avoid terms that orient things in time, such as referencing -specific dates (Q2 2022) or saying "now", "currently", or "soon." These go -stale quickly and could be incorrect if it's a future projection. Instead, -specify a version level instead, such as "Bazel X.x and higher supports -\ or a GitHub issue link. - -- Yes: Bazel 0.10.0 or later supports - remote caching. -- No: Bazel will soon support remote - caching, likely in October 2017. - -### Tense - -- Use present tense. Avoid past or future tense unless absolutely necessary - for clarity. - - Yes: Bazel issues an error when it - finds dependencies that don't conform to this rule. - - No: If Bazel finds a dependency that - does not conform to this rule, Bazel will issue an error. - -- Where possible, use active voice (where a subject acts upon an object) not - passive voice (where an object is acted upon by a subject). Generally, - active voice makes sentences clearer because it shows who is responsible. If - using active voice detracts from clarity, use passive voice. - - Yes: Bazel initiates X and uses the - output to build Y. - - No: X is initiated by Bazel and then - afterward Y will be built with the output. - -### Tone - -Write with a business friendly tone. - -- Avoid colloquial language. It's harder to translate phrases that are - specific to English. - - Yes: Good rulesets - - No: So what is a good ruleset? - -- Avoid overly formal language. Write as though you're explaining the - concept to someone who is curious about tech, but doesn't know the details. - -## Formatting - -### File type - -For readability, wrap lines at 80 characters. Long links or code snippets -may be longer, but should start on a new line. For example: - -Note: Where possible, use Markdown instead of HTML in your files. Follow the -[GitHub Markdown Syntax Guide](https://guides.github.com/features/mastering-markdown/#syntax) -for recommended Markdown style. - -### Links - -- Use descriptive link text instead of "here" or "below". This practice - makes it easier to scan a doc and is better for screen readers. - - Yes: For more details, see [Installing Bazel]. - - No: For more details, see [here]. - -- End the sentence with the link, if possible. - - Yes: For more details, see [link]. - - No: See [link] for more information. - -### Lists - -- Use an ordered list to describe how to accomplish a task with steps -- Use an unordered list to list things that aren't task based. (There should - still be an order of sorts, such as alphabetical, importance, etc.) -- Write with parallel structure. For example: - 1. Make all the list items sentences. - 1. Start with verbs that are the same tense. - 1. Use an ordered list if there are steps to follow. - -### Placeholders - -- Use angle brackets to denote a variable that users should change. - In Markdown, escape the angle brackets with a back slash: `\`. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" - -- Especially for complicated code samples, use placeholders that make sense - in context. - -### Table of contents - -Use the auto-generated TOC supported by the site. Don't add a manual TOC. - -## Code - -Code samples are developers' best friends. You probably know how to write these -already, but here are a few tips. - -If you're referencing a small snippet of code, you can embed it in a sentence. -If you want the reader to use the code, such as copying a command, use a code -block. - -### Code blocks - -- Keep it short. Eliminate all redundant or unnecessary text from a code - sample. -- In Markdown, specify the type of code block by adding the sample's language. - -``` -```shell -... -``` - -- Separate commands and output into different code blocks. - -### Inline code formatting - -- Use code style for filenames, directories, paths, and small bits of code. -- Use inline code styling instead of _italics_, "quotes," or **bolding**. - - Yes: `bazel help `: Prints - help and options for `` - - No: bazel help _command_: Prints help - and options for "command" diff --git a/8.4.2/contribute/docs.mdx b/8.4.2/contribute/docs.mdx deleted file mode 100644 index cc240cc..0000000 --- a/8.4.2/contribute/docs.mdx +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: 'Contribute to Bazel documentation' ---- - - - -Thank you for contributing to Bazel's documentation! There are a few ways to -help create better docs for our community. - -## Documentation types - -This site includes a few types of content. - - - *Narrative documentation*, which is written by technical writers and - engineers. Most of this site is narrative documentation that covers - conceptual and task-based guides. - - *Reference documentation*, which is generated documentation from code comments. - You can't make changes to the reference doc pages directly, but instead need - to change their source. - -## Documentation infrastructure - -Bazel documentation is served from Google and the source files are mirrored in -Bazel's GitHub repository. You can make changes to the source files in GitHub. -If approved, you can merge the changes and a Bazel maintainer will update the -website source to publish your updates. - - -## Small changes - -You can approach small changes, such as fixing errors or typos, in a couple of -ways. - - - **Pull request**. You can create a pull request in GitHub with the - [web-based editor](https://docs.github.com/repositories/working-with-files/managing-files/editing-files) or on a branch. - - **Bug**. You can file a bug with details and suggested changes and the Bazel - documentation owners will make the update. - -## Large changes - -If you want to make substantial changes to existing documentation or propose -new documentation, you can either create a pull request or start with a Google -doc and contact the Bazel Owners to collaborate. diff --git a/8.4.2/contribute/index.mdx b/8.4.2/contribute/index.mdx deleted file mode 100644 index ee66772..0000000 --- a/8.4.2/contribute/index.mdx +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: 'Contributing to Bazel' ---- - - - -There are many ways to help the Bazel project and ecosystem. - -## Provide feedback - -As you use Bazel, you may find things that can be improved. -You can help by [reporting issues](http://github.com/bazelbuild/bazel/issues) -when: - - - Bazel crashes or you encounter a bug that can [only be resolved using `bazel - clean`](/run/build#correct-incremental-rebuilds). - - The documentation is incomplete or unclear. You can also report issues - from the page you are viewing by using the "Create issue" - link at the top right corner of the page. - - An error message could be improved. - -## Participate in the community - -You can engage with the Bazel community by: - - - Answering questions [on Stack Overflow]( - https://stackoverflow.com/questions/tagged/bazel). - - Helping other users [on Slack](https://slack.bazel.build). - - Improving documentation or [contributing examples]( - https://github.com/bazelbuild/examples). - - Sharing your experience or your tips, for example, on a blog or social media. - -## Contribute code - -Bazel is a large project and making a change to the Bazel source code -can be difficult. - -You can contribute to the Bazel ecosystem by: - - - Helping rules maintainers by contributing pull requests. - - Creating new rules and open-sourcing them. - - Contributing to Bazel-related tools, for example, migration tools. - - Improving Bazel integration with other IDEs and tools. - -Before making a change, [create a GitHub -issue](http://github.com/bazelbuild/bazel/issues) -or email [bazel-discuss@](mailto:bazel-discuss@googlegroups.com). - -The most helpful contributions fix bugs or add features (as opposed -to stylistic, refactoring, or "cleanup" changes). Your change should -include tests and documentation, keeping in mind backward-compatibility, -portability, and the impact on memory usage and performance. - -To learn about how to submit a change, see the -[patch acceptance process](/contribute/patch-acceptance). - -## Bazel's code description - -Bazel has a large codebase with code in multiple locations. See the [codebase guide](/contribute/codebase) for more details. - -Bazel is organized as follows: - -* Client code is in `src/main/cpp` and provides the command-line interface. -* Protocol buffers are in `src/main/protobuf`. -* Server code is in `src/main/java` and `src/test/java`. - * Core code which is mostly composed of [SkyFrame](/reference/skyframe) - and some utilities. - * Built-in rules are in `com.google.devtools.build.lib.rules` and in - `com.google.devtools.build.lib.bazel.rules`. You might want to read about - the [Challenges of Writing Rules](/rules/challenges) first. -* Java native interfaces are in `src/main/native`. -* Various tooling for language support are described in the list in the - [compiling Bazel](/install/compile-source) section. - - -### Searching Bazel's source code - -To quickly search through Bazel's source code, use -[Bazel Code Search](https://source.bazel.build/). You can navigate Bazel's -repositories, branches, and files. You can also view history, diffs, and blame -information. To learn more, see the -[Bazel Code Search User Guide](/contribute/search). diff --git a/8.4.2/contribute/maintainers-guide.mdx b/8.4.2/contribute/maintainers-guide.mdx deleted file mode 100644 index d5edf45..0000000 --- a/8.4.2/contribute/maintainers-guide.mdx +++ /dev/null @@ -1,213 +0,0 @@ ---- -title: 'Guide for Bazel Maintainers' ---- - - - -This is a guide for the maintainers of the Bazel open source project. - -If you are looking to contribute to Bazel, please read [Contributing to -Bazel](/contribute) instead. - -The objectives of this page are to: - -1. Serve as the maintainers' source of truth for the project’s contribution - process. -1. Set expectations between the community contributors and the project - maintainers. - -Bazel's [core group of contributors](/contribute/policy) has dedicated -subteams to manage aspects of the open source project. These are: - -* **Release Process**: Manage Bazel's release process. -* **Green Team**: Grow a healthy ecosystem of rules and tools. -* **Developer Experience Gardeners**: Encourage external contributions, review - issues and pull requests, and make our development workflow more open. - -## Releases - -* [Release Playbook](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md) -* [Testing local changes with downstream projects](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md) - -## Continuous Integration - -Read the Green team's guide to Bazel's CI infrastructure on the -[bazelbuild/continuous-integration](https://github.com/bazelbuild/continuous-integration/blob/master/buildkite/README.md) -repository. - -## Lifecycle of an Issue - -1. A user creates an issue by choosing one of the -[issue templates](https://github.com/bazelbuild/bazel/issues/new/choose) - and it enters the pool of [unreviewed open - issues](https://github.com/bazelbuild/bazel/issues?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+-label%3Auntriaged+-label%3Ap2+-label%3Ap1+-label%3Ap3+-label%3Ap4+-label%3Ateam-Starlark+-label%3Ateam-Rules-CPP+-label%3Ateam-Rules-Java+-label%3Ateam-XProduct+-label%3Ateam-Android+-label%3Ateam-Apple+-label%3Ateam-Configurability++-label%3Ateam-Performance+-label%3Ateam-Rules-Server+-label%3Ateam-Core+-label%3Ateam-Rules-Python+-label%3Ateam-Remote-Exec+-label%3Ateam-Local-Exec+-label%3Ateam-Bazel). -1. A member on the Developer Experience (DevEx) subteam rotation reviews the - issue. - 1. If the issue is **not a bug** or a **feature request**, the DevEx member - will usually close the issue and redirect the user to - [StackOverflow](https://stackoverflow.com/questions/tagged/bazel) and - [bazel-discuss](https://groups.google.com/forum/#!forum/bazel-discuss) for - higher visibility on the question. - 1. If the issue belongs in one of the rules repositories owned by the - community, like [rules_apple](https://github.com.bazelbuild/rules_apple), - the DevEx member will [transfer this issue](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/transferring-an-issue-to-another-repository) - to the correct repository. - 1. If the issue is vague or has missing information, the DevEx member will - assign the issue back to the user to request for more information before - continuing. This usually occurs when the user does not choose the right - [issue template](https://github.com/bazelbuild/bazel/issues/new/choose) - or provides incomplete information. -1. After reviewing the issue, the DevEx member decides if the issue requires - immediate attention. If it does, they will assign the **P0** - [priority](#priority) label and an owner from the list of team leads. -1. The DevEx member assigns the `untriaged` label and exactly one [team - label](#team-labels) for routing. -1. The DevEx member also assigns exactly one `type:` label, such as `type: bug` - or `type: feature request`, according to the type of the issue. -1. For platform-specific issues, the DevEx member assigns one `platform:` label, - such as `platform:apple` for Mac-specific issues. -1. If the issue is low priority and can be worked on by a new community - contributor, the DevEx member assigns the `good first issue` label. -At this stage, the issue enters the pool of [untriaged open -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Auntriaged). - -Each Bazel subteam will triage all issues under labels they own, preferably on a -weekly basis. The subteam will review and evaluate the issue and provide a -resolution, if possible. If you are an owner of a team label, see [this section -](#label-own) for more information. - -When an issue is resolved, it can be closed. - -## Lifecycle of a Pull Request - -1. A user creates a pull request. -1. If you a member of a Bazel team and sending a PR against your own area, - you are responsible for assigning your team label and finding the best - reviewer. -1. Otherwise, during daily triage, a DevEx member assigns one - [team label](#team-labels) and the team's technical lead (TL) for routing. - 1. The TL may optionally assign someone else to review the PR. -1. The assigned reviewer reviews the PR and works with the author until it is - approved or dropped. -1. If approved, the reviewer **imports** the PR's commit(s) into Google's - internal version control system for further tests. As Bazel is the same build - system used internally at Google, we need to test all PR commits against the - internal test suite. This is the reason why we do not merge PRs directly. -1. If the imported commit passes all internal tests, the commit will be squashed - and exported back out to GitHub. -1. When the commit merges into master, GitHub automatically closes the PR. - - -## My team owns a label. What should I do? - -Subteams need to triage all issues in the [labels they own](#team-labels), -preferably on a weekly basis. - -### Issues - -1. Filter the list of issues by your team label **and** the `untriaged` label. -1. Review the issue. -1. Identify a [priority level](#priority) and assign the label. - 1. The issue may have already been prioritized by the DevEx subteam if it's a - P0. Re-prioritize if needed. - 1. Each issue needs to have exactly one [priority label](#priority). If an - issue is either P0 or P1 we assume that is actively worked on. -1. Remove the `untriaged` label. - -Note that you need to be in the [bazelbuild -organization](https://github.com/bazelbuild) to be able to add or remove labels. - -### Pull Requests - -1. Filter the list of pull requests by your team label. -1. Review open pull requests. - 1. **Optional**: If you are assigned for the review but is not the right fit - for it, re-assign the appropriate reviewer to perform a code review. -1. Work with the pull request creator to complete a code review. -1. Approve the PR. -1. Ensure that all tests pass. -1. Import the patch to the internal version control system and run the internal - presubmits. -1. Submit the internal patch. If the patch submits and exports successfully, the - PR will be closed automatically by GitHub. - -## Priority - -The following definitions for priority will be used by the maintainers to triage -issues. - -* [**P0**](https://github.com/bazelbuild/bazel/labels/P0) - Major broken - functionality that causes a Bazel release (minus release candidates) to be - unusable, or a downed service that severely impacts development of the Bazel - project. This includes regressions introduced in a new release that blocks a - significant number of users, or an incompatible breaking change that was not - compliant to the [Breaking - Change](https://docs.google.com/document/d/1q5GGRxKrF_mnwtaPKI487P8OdDRh2nN7jX6U-FXnHL0/edit?pli=1#heading=h.ceof6vpkb3ik) - policy. No practical workaround exists. -* [**P1**](https://github.com/bazelbuild/bazel/labels/P1) - Critical defect or - feature which should be addressed in the next release, or a serious issue that - impacts many users (including the development of the Bazel project), but a - practical workaround exists. Typically does not require immediate action. In - high demand and planned in the current quarter's roadmap. -* [**P2**](https://github.com/bazelbuild/bazel/labels/P2) - Defect or feature - that should be addressed but we don't currently work on. Moderate live issue - in a released Bazel version that is inconvenient for a user that needs to be - addressed in an future release and/or an easy workaround exists. -* [**P3**](https://github.com/bazelbuild/bazel/labels/P3) - Desirable minor bug - fix or enhancement with small impact. Not prioritized into Bazel roadmaps or - any imminent release, however community contributions are encouraged. -* [**P4**](https://github.com/bazelbuild/bazel/labels/P4) - Low priority defect - or feature request that is unlikely to get closed. Can also be kept open for a - potential re-prioritization if more users are impacted. -* [**ice-box**](https://github.com/bazelbuild/bazel/issues?q=label%3Aice-box+is%3Aclosed) - - Issues that we currently don't have time to deal with nor the - time to accept contributions. We will close these issues to indicate that - nobody is working on them, but will continue to monitor their validity over - time and revive them if enough people are impacted and if we happen to have - resources to deal with them. As always, feel free to comment or add reactions - to these issues even when closed. - -## Team labels - -* [`team-Android`](https://github.com/bazelbuild/bazel/labels/team-Android): Issues for Android team - * Contact: [ahumesky](https://github.com/ahumesky) -* [`team-Bazel`](https://github.com/bazelbuild/bazel/labels/team-Bazel): General Bazel product/strategy issues - * Contact: [meisterT](https://github.com/meisterT) -* [`team-CLI`](https://github.com/bazelbuild/bazel/labels/team-CLI): Console UI - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Configurability`](https://github.com/bazelbuild/bazel/labels/team-Configurability): Issues for Configurability team. Includes: Core build configuration and transition system. Does *not* include: Changes to new or existing flags - * Contact: [gregestren](https://github.com/gregestren) -* [`team-Core`](https://github.com/bazelbuild/bazel/labels/team-Core): Skyframe, bazel query, BEP, options parsing, bazelrc - * Contact: [haxorz](https://github.com/haxorz) -* [`team-Documentation`](https://github.com/bazelbuild/bazel/labels/team-Documentation): Issues for Documentation team -* [`team-ExternalDeps`](https://github.com/bazelbuild/bazel/labels/team-ExternalDeps): External dependency handling, Bzlmod, remote repositories, WORKSPACE file - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Loading-API`](https://github.com/bazelbuild/bazel/labels/team-Loading-API): BUILD file and macro processing: labels, package(), visibility, glob - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Local-Exec`](https://github.com/bazelbuild/bazel/labels/team-Local-Exec): Issues for Execution (Local) team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-OSS`](https://github.com/bazelbuild/bazel/labels/team-OSS): Issues for Bazel OSS team: installation, release process, Bazel packaging, website, docs infrastructure - * Contact: [meteorcloudy](https://github.com/meteorcloudy) -* [`team-Performance`](https://github.com/bazelbuild/bazel/labels/team-Performance): Issues for Bazel Performance team - * Contact: [meisterT](https://github.com/meisterT) -* [`team-Remote-Exec`](https://github.com/bazelbuild/bazel/labels/team-Remote-Exec): Issues for Execution (Remote) team - * Contact: [coeuvre](https://github.com/coeuvre) -* [`team-Rules-API`](https://github.com/bazelbuild/bazel/labels/team-Rules-API): API for writing rules/aspects: providers, runfiles, actions, artifacts - * Contact: [comius](https://github.com/comius) -* [`team-Rules-CPP`](https://github.com/bazelbuild/bazel/labels/team-Rules-CPP) / [`team-Rules-ObjC`](https://github.com/bazelbuild/bazel/labels/team-Rules-ObjC): Issues for C++/Objective-C rules, including native Apple rule logic - * Contact: [buildbreaker2021](https://github.com/buildbreaker2021) -* [`team-Rules-Java`](https://github.com/bazelbuild/bazel/labels/team-Rules-Java): Issues for Java rules - * Contact: [hvadehra](https://github.com/hvadehra) -* [`team-Rules-Python`](https://github.com/bazelbuild/bazel/labels/team-Rules-Python): Issues for the native Python rules - * Contact: [rickeylev](https://github.com/rickeylev) -* [`team-Rules-Server`](https://github.com/bazelbuild/bazel/labels/team-Rules-Server): Issues for server-side rules included with Bazel - * Contact: [comius](https://github.com/comius) -* [`team-Starlark-Integration`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Integration): Non-API Bazel + Starlark integration. Includes: how Bazel triggers the Starlark interpreter, Stardoc, builtins injection, character encoding. Does *not* include: BUILD or .bzl language issues. - * Contact: [brandjon](https://github.com/brandjon) -* [`team-Starlark-Interpreter`](https://github.com/bazelbuild/bazel/labels/team-Starlark-Interpreter): Issues for the Starlark interpreter (anything in [java.net.starlark](https://github.com/bazelbuild/bazel/tree/master/src/main/java/net/starlark/java)). BUILD and .bzl API issues (which represent Bazel's *integration* with Starlark) go in `team-Build-Language`. - * Contact: [brandjon](https://github.com/brandjon) - -For new issues, we deprecated the `category: *` labels in favor of the team -labels. - -See the full list of labels [here](https://github.com/bazelbuild/bazel/labels). diff --git a/8.4.2/contribute/naming.mdx b/8.4.2/contribute/naming.mdx deleted file mode 100644 index 144b08a..0000000 --- a/8.4.2/contribute/naming.mdx +++ /dev/null @@ -1,73 +0,0 @@ ---- -title: 'Naming a Bazel related project' ---- - - - -First, thank you for contributing to the Bazel ecosystem! Please reach out to -the Bazel community on the -[bazel-discuss mailing list](https://groups.google.com/forum/#!forum/bazel-discuss -) to share your project and its suggested name. - -If you are building a Bazel related tool or sharing your Skylark rules, -we recommend following these guidelines for the name of your project: - -## Naming Starlark rules - -See [Deploying new Starlark rules](/rules/deploying) -in the docs. - -## Naming other Bazel related tools - -This section applies if you are building a tool to enrich the Bazel ecosystem. -For example, a new IDE plugin or a new build system migrator. - -Picking a good name for your tool can be hard. If we’re not careful and use too -many codenames, the Bazel ecosystem could become very difficult to understand -for newcomers. - -Follow these guidelines for naming Bazel tools: - -1. Prefer **not introducing a new brand name**: "*Bazel*" is already a new brand -for our users, we should avoid confusing them with too many new names. - -2. Prefer **using a name that includes "Bazel"**: This helps to express that it -is a Bazel related tool, it also helps people find it with a search engine. - -3. Prefer **using names that are descriptive about what the tool is doing**: -Ideally, the name should not need a subtitle for users to have a first good -guess at what the tool does. Using english words separated by spaces is a good -way to achieve this. - -4. **It is not a requirement to use a floral or food theme**: Bazel evokes -[basil](https://en.wikipedia.org/wiki/Basil), the plant. You do not need to -look for a name that is a plant, food or that relates to "basil." - -5. **If your tool relates to another third party brand, use it only as a -descriptor**: For example, use "Bazel migrator for Cmake" instead of -"Cmake Bazel migrator". - -These guidelines also apply to the GitHub repository URL. Reading the repository -URL should help people understand what the tool does. Of course, the repository -name can be shorter and must use dashes instead of spaces and lower case letters. - - -Examples of good names: - -* *Bazel for Eclipse*: Users will understand that if they want to use Bazel - with Eclipse, this is where they should be looking. It uses a third party brand - as a descriptor. -* *Bazel buildfarm*: A "buildfarm" is a - [compile farm](https://en.wikipedia.org/wiki/Compile_farm). Users - will understand that this project relates to building on servers. - -Examples of names to avoid: - -* *Ocimum*: The [scientific name of basil](https://en.wikipedia.org/wiki/Ocimum) - does not relate enough to the Bazel project. -* *Bazelizer*: The tool behind this name could do a lot of things, this name is - not descriptive enough. - -Note that these recommendations are aligned with the -[guidelines](https://opensource.google.com/docs/releasing/preparing/#name) -Google uses when open sourcing a project. diff --git a/8.4.2/contribute/patch-acceptance.mdx b/8.4.2/contribute/patch-acceptance.mdx deleted file mode 100644 index 87376af..0000000 --- a/8.4.2/contribute/patch-acceptance.mdx +++ /dev/null @@ -1,52 +0,0 @@ ---- -title: 'Patch Acceptance Process' ---- - - - -This page outlines how contributors can propose and make changes to the Bazel -code base. - -1. Read the [Bazel Contribution policy](/contribute/policy). -1. Create a [GitHub issue](https://github.com/bazelbuild/bazel/) to - discuss your plan and design. Pull requests that change or add behavior - need a corresponding issue for tracking. -1. If you're proposing significant changes, write a - [design document](/contribute/design-documents). -1. Ensure you've signed a [Contributor License - Agreement](https://cla.developers.google.com). -1. Prepare a git commit that implements the feature. Don't forget to add tests - and update the documentation. If your change has user-visible effects, please - [add release notes](/contribute/release-notes). If it is an incompatible change, - read the [guide for rolling out breaking changes](/contribute/breaking-changes). -1. Create a pull request on - [GitHub](https://github.com/bazelbuild/bazel/pulls). If you're new to GitHub, - read [about pull - requests](https://help.github.com/articles/about-pull-requests/). Note that - we restrict permissions to create branches on the main Bazel repository, so - you will need to push your commit to [your own fork of the - repository](https://help.github.com/articles/working-with-forks/). -1. A Bazel maintainer should assign you a reviewer within two business days - (excluding holidays in the USA and Germany). If you aren't assigned a - reviewer in that time, you can request one by emailing - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. Work with the reviewer to complete a code review. For each change, create a - new commit and push it to make changes to your pull request. If the review - takes too long (for instance, if the reviewer is unresponsive), send an email to - [bazel-discuss@googlegroups.com] - (mailto:bazel-discuss@googlegroups.com). -1. After your review is complete, a Bazel maintainer applies your patch to - Google's internal version control system. - - This triggers internal presubmit checks - that may suggest more changes. If you haven't expressed a preference, the - maintainer submitting your change adds "trivial" changes (such as - [linting](https://en.wikipedia.org/wiki/Lint_(software))) that don't affect - design. If deeper changes are required or you'd prefer to apply - changes directly, you and the reviewer should communicate preferences - clearly in review comments. - - After internal submission, the patch is exported as a Git commit, - at which point the GitHub pull request is closed. All final changes - are attributed to you. diff --git a/8.4.2/contribute/policy.mdx b/8.4.2/contribute/policy.mdx deleted file mode 100644 index 1bf0029..0000000 --- a/8.4.2/contribute/policy.mdx +++ /dev/null @@ -1,78 +0,0 @@ -translation: human -page_type: lcat ---- -title: 'Contribution policy' ---- - - - -This page covers Bazel's governance model and contribution policy. - -## Governance model - -The [Bazel project](https://github.com/bazelbuild) is led and managed by Google -and has a large community of contributors outside of Google. Some Bazel -components (such as specific rules repositories under the -[bazelbuild](https://github.com/bazelbuild) organization) are led, -maintained, and managed by members of the community. The Google Bazel team -reviews suggestions to add community-owned repositories (such as rules) to the -[bazelbuild](https://github.com/bazelbuild) GitHub organization. - -### Contributor roles - -Here are outlines of the roles in the Bazel project, including their -responsibilities: - -* **Owners**: The Google Bazel team. Owners are responsible for: - * Strategy, maintenance, and leadership of the Bazel project. - * Building and maintaining Bazel's core functionality. - * Appointing Maintainers and approving new repositories. -* **Maintainers**: The Google Bazel team and designated GitHub users. - Maintainers are responsible for: - * Building and maintaining the primary functionality of their repository. - * Reviewing and approving contributions to areas of the Bazel code base. - * Supporting users and contributors with timely and transparent issue - management, PR review, and documentation. - * Releasing, testing and collaborating with Bazel Owners. -* **Contributors**: All users who contribute code or documentation to the - Bazel project. - * Creating well-written PRs to contribute to Bazel's codebase and - documentation. - * Using standard channels, such as GitHub Issues, to propose changes and - report issues. - -### Becoming a Maintainer - -Bazel Owners may appoint Maintainers to lead well-defined areas of code, such as -rule sets. Contributors with a record of consistent, responsible past -contributions who are planning major contributions in the future could be -considered to become qualified Maintainers. - -## Contribution policy - -The Bazel project accepts contributions from external contributors. Here are the -contribution policies for Google-managed and Community-managed areas of code. - -* **Licensing**. All Maintainers and Contributors must sign the - [Google’s Contributor License Agreement](https://cla.developers.google.com/clas). -* **Contributions**. Owners and Maintainers should make every effort to accept - worthwhile contributions. All contributions must be: - * Well written and well tested - * Discussed and approved by the Maintainers of the relevant area of code. - Discussions and approvals happen on GitHub Issues and in GitHub PRs. - Larger contributions require a - [design review](/contribute/design-documents). - * Added to Bazel's Continuous Integration system if not already present. - * Supportable and aligned with Bazel product direction -* **Code review**. All changes in all `bazelbuild` repositories require - review: - * All PRs must be approved by an Owner or Maintainer. - * Only Owners and Maintainers can merge PRs. -* **Compatibility**. Owners may need to reject or request modifications to PRs - in the unlikely event that the change requires substantial modifications to - internal Google systems. -* **Documentation**. Where relevant, feature contributions should include - documentation updates. - -For more details on contributing to Bazel, see our -[contribution guidelines](/contribute/). diff --git a/8.4.2/contribute/release-notes.mdx b/8.4.2/contribute/release-notes.mdx deleted file mode 100644 index 83e1d75..0000000 --- a/8.4.2/contribute/release-notes.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: 'Writing release notes' ---- - - - -This document is targeted at Bazel contributors. - -Commit descriptions in Bazel include a `RELNOTES:` tag followed by a release -note. This is used by the Bazel team to track changes in each release and write -the release announcement. - -## Overview - -* Is your change a bugfix? In that case, you don't need a release note. Please - include a reference to the GitHub issue. - -* If the change adds / removes / changes Bazel in a user-visible way, then it - may be advantageous to mention it. - -If the change is significant, follow the [design document -policy](/contribute/design-documents) first. - -## Guidelines - -The release notes will be read by our users, so it should be short (ideally one -sentence), avoid jargon (Bazel-internal terminology), should focus on what the -change is about. - -* Include a link to the relevant documentation. Almost any release note should - contain a link. If the description mentions a flag, a feature, a command name, - users will probably want to know more about it. - -* Use backquotes around code, symbols, flags, or any word containing an - underscore. - -* Do not just copy and paste bug descriptions. They are often cryptic and only - make sense to us and leave the user scratching their head. Release notes are - meant to explain what has changed and why in user-understandable language. - -* Always use present tense and the format "Bazel now supports Y" or "X now does - Z." We don't want our release notes to sound like bug entries. All release - note entries should be informative and use a consistent style and language. - -* If something has been deprecated or removed, use "X has been deprecated" or "X - has been removed." Not "is removed" or "was removed." - -* If Bazel now does something differently, use "X now $newBehavior instead of - $oldBehavior" in present tense. This lets the user know in detail what to - expect when they use the new release. - -* If Bazel now supports or no longer supports something, use "Bazel now supports - / no longer supports X". - -* Explain why something has been removed / deprecated / changed. One sentence is - enough but we want the user to be able to evaluate impact on their builds. - -* Do NOT make any promises about future functionality. Avoid "this flag will be - removed" or "this will be changed." It introduces uncertainty. The first thing - the user will wonder is "when?" and we don't want them to start worrying about - their current builds breaking at some unknown time. - -## Process - -As part of the [release -process](https://github.com/bazelbuild/continuous-integration/blob/master/docs/release-playbook.md), -we collect the `RELNOTES` tags of every commit. We copy everything in a [Google -Doc](https://docs.google.com/document/d/1wDvulLlj4NAlPZamdlEVFORks3YXJonCjyuQMUQEmB0/edit) -where we review, edit, and organize the notes. - -The release manager sends an email to the -[bazel-dev](https://groups.google.com/forum/#!forum/bazel-dev) mailing-list. -Bazel contributors are invited to contribute to the document and make sure -their changes are correctly reflected in the announcement. - -Later, the announcement will be submitted to the [Bazel -blog](https://blog.bazel.build/), using the [bazel-blog -repository](https://github.com/bazelbuild/bazel-blog/tree/master/_posts). diff --git a/8.4.2/contribute/statemachine-guide.mdx b/8.4.2/contribute/statemachine-guide.mdx deleted file mode 100644 index e98a96e..0000000 --- a/8.4.2/contribute/statemachine-guide.mdx +++ /dev/null @@ -1,1236 +0,0 @@ ---- -title: 'A Guide to Skyframe `StateMachine`s' ---- - - - -## Overview - -A Skyframe `StateMachine` is a *deconstructed* function-object that resides on -the heap. It supports flexible and evaluation without redundancy[^1] when -required values are not immediately available but computed asynchronously. The -`StateMachine` cannot tie up a thread resource while waiting, but instead has to -be suspended and resumed. The deconstruction thus exposes explicit re-entry -points so that prior computations can be skipped. - -`StateMachine`s can be used to express sequences, branching, structured logical -concurrency and are tailored specifically for Skyframe interaction. -`StateMachine`s can be composed into larger `StateMachine`s and share -sub-`StateMachine`s. Concurrency is always hierarchical by construction and -purely logical. Every concurrent subtask runs in the single shared parent -SkyFunction thread. - -## Introduction - -This section briefly motivates and introduces `StateMachine`s, found in the -[`java.com.google.devtools.build.skyframe.state`](https://github.com/bazelbuild/bazel/tree/master/src/main/java/com/google/devtools/build/skyframe/state) -package. - -### A brief introduction to Skyframe restarts - -Skyframe is a framework that performs parallel evaluation of dependency graphs. -Each node in the graph corresponds with the evaluation of a SkyFunction with a -SkyKey specifying its parameters and SkyValue specifying its result. The -computational model is such that a SkyFunction may lookup SkyValues by SkyKey, -triggering recursive, parallel evaluation of additional SkyFunctions. Instead of -blocking, which would tie up a thread, when a requested SkyValue is not yet -ready because some subgraph of computation is incomplete, the requesting -SkyFunction observes a `null` `getValue` response and should return `null` -instead of a SkyValue, signaling that it is incomplete due to missing inputs. -Skyframe *restarts* the SkyFunctions when all previously requested SkyValues -become available. - -Before the introduction of `SkyKeyComputeState`, the traditional way of handling -a restart was to fully rerun the computation. Although this has quadratic -complexity, functions written this way eventually complete because each rerun, -fewer lookups return `null`. With `SkyKeyComputeState` it is possible to -associate hand-specified check-point data with a SkyFunction, saving significant -recomputation. - -`StateMachine`s are objects that live inside `SkyKeyComputeState` and eliminate -virtually all recomputation when a SkyFunction restarts (assuming that -`SkyKeyComputeState` does not fall out of cache) by exposing suspend and resume -execution hooks. - -### Stateful computations inside `SkyKeyComputeState` - -From an object-oriented design standpoint, it makes sense to consider storing -computational objects inside `SkyKeyComputeState` instead of pure data values. -In *Java*, the bare minimum description of a behavior carrying object is a -*functional interface* and it turns out to be sufficient. A `StateMachine` has -the following, curiously recursive, definition[^2]. - -``` -@FunctionalInterface -public interface StateMachine { - StateMachine step(Tasks tasks) throws InterruptedException; -} -``` - -The `Tasks` interface is analogous to `SkyFunction.Environment` but it is -designed for asynchrony and adds support for logically concurrent subtasks[^3]. - -The return value of `step` is another `StateMachine`, allowing the specification -of a sequence of steps, inductively. `step` returns `DONE` when the -`StateMachine` is done. For example: - -``` -class HelloWorld implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - System.out.println("hello"); - return this::step2; // The next step is HelloWorld.step2. - } - - private StateMachine step2(Tasks tasks) { - System.out.println("world"); - // DONE is special value defined in the `StateMachine` interface signaling - // that the computation is done. - return DONE; - } -} -``` - -describes a `StateMachine` with the following output. - -``` -hello -world -``` - -Note that the method reference `this::step2` is also a `StateMachine` due to -`step2` satisfying `StateMachine`'s functional interface definition. Method -references are the most common way to specify the next state in a -`StateMachine`. - -![Suspending and resuming](/contribute/images/suspend-resume.svg) - -Intuitively, breaking a computation down into `StateMachine` steps, instead of a -monolithic function, provides the hooks needed to *suspend* and *resume* a -computation. When `StateMachine.step` returns, there is an explicit *suspension* -point. The continuation specified by the returned `StateMachine` value is an -explicit *resume* point. Recomputation can thus be avoided because the -computation can be picked up exactly where it left off. - -### Callbacks, continuations and asynchronous computation - -In technical terms, a `StateMachine` serves as a *continuation*, determining the -subsequent computation to be executed. Instead of blocking, a `StateMachine` can -voluntarily *suspend* by returning from the `step` function, which transfers -control back to a [`Driver`](#drivers-and-bridging) instance. The `Driver` can -then switch to a ready `StateMachine` or relinquish control back to Skyframe. - -Traditionally, *callbacks* and *continuations* are conflated into one concept. -However, `StateMachine`s maintain a distinction between the two. - -* *Callback* - describes where to store the result of an asynchronous - computation. -* *Continuation* - specifies the next execution state. - -Callbacks are required when invoking an asynchronous operation, which means that -the actual operation doesn't occur immediately upon calling the method, as in -the case of a SkyValue lookup. Callbacks should be kept as simple as possible. - -Caution: A common pitfall of callbacks is that the asynchronous computation must -ensure the callback is called by the end of every reachable path. It's possible -to overlook some branches and the compiler doesn't give warnings about this. - -*Continuations* are the `StateMachine` return values of `StateMachine`s and -encapsulate the complex execution that follows once all asynchronous -computations resolve. This structured approach helps to keep the complexity of -callbacks manageable. - -## Tasks - -The `Tasks` interface provides `StateMachine`s with an API to lookup SkyValues -by SkyKey and to schedule concurrent subtasks. - -``` -interface Tasks { - void enqueue(StateMachine subtask); - - void lookUp(SkyKey key, Consumer sink); - - - void lookUp(SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - // lookUp overloads for 2 and 3 exception types exist, but are elided here. -} -``` - -Tip: When any state uses the `Tasks` interface to perform lookups or create -subtasks, those lookups and subtasks will complete before the next state begins. - -Tip: (Corollary) If subtasks are complex `StateMachine`s or recursively create -subtasks, they all *transitively* complete before the next state begins. - -### SkyValue lookups - -`StateMachine`s use `Tasks.lookUp` overloads to look up SkyValues. They are -analogous to `SkyFunction.Environment.getValue` and -`SkyFunction.Environment.getValueOrThrow` and have similar exception handling -semantics. The implementation does not immediately perform the lookup, but -instead, batches[^4] as many lookups as possible before doing so. The value -might not be immediately available, for example, requiring a Skyframe restart, -so the caller specifies what to do with the resulting value using a callback. - -The `StateMachine` processor ([`Driver`s and bridging to -SkyFrame](#drivers-and-bridging)) guarantees that the value is available before -the next state begins. An example follows. - -``` -class DoesLookup implements StateMachine, Consumer { - private Value value; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key(), (Consumer) this); - return this::processValue; - } - - // The `lookUp` call in `step` causes this to be called before `processValue`. - @Override // Implementation of Consumer. - public void accept(SkyValue value) { - this.value = (Value)value; - } - - private StateMachine processValue(Tasks tasks) { - System.out.println(value); // Prints the string representation of `value`. - return DONE; - } -} -``` - -In the above example, the first step does a lookup for `new Key()`, passing -`this` as the consumer. That is possible because `DoesLookup` implements -`Consumer`. - -Tip: When passing `this` as a value sink, it's helpful to readers to upcast it -to the receiver type to narrow down the purpose of passing `this`. The example -passes `(Consumer) this`. - -By contract, before the next state `DoesLookup.processValue` begins, all the -lookups of `DoesLookup.step` are complete. Therefore `value` is available when -it is accessed in `processValue`. - -### Subtasks - -`Tasks.enqueue` requests the execution of logically concurrent subtasks. -Subtasks are also `StateMachine`s and can do anything regular `StateMachine`s -can do, including recursively creating more subtasks or looking up SkyValues. -Much like `lookUp`, the state machine driver ensures that all subtasks are -complete before proceeding to the next step. An example follows. - -``` -class Subtasks implements StateMachine { - private int i = 0; - - @Override - public StateMachine step(Tasks tasks) { - tasks.enqueue(new Subtask1()); - tasks.enqueue(new Subtask2()); - // The next step is Subtasks.processResults. It won't be called until both - // Subtask1 and Subtask 2 are complete. - return this::processResults; - } - - private StateMachine processResults(Tasks tasks) { - System.out.println(i); // Prints "3". - return DONE; // Subtasks is done. - } - - private class Subtask1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 1; - return DONE; // Subtask1 is done. - } - } - - private class Subtask2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - i += 2; - return DONE; // Subtask2 is done. - } - } -} -``` - -Though `Subtask1` and `Subtask2` are logically concurrent, everything runs in a -single thread so the "concurrent" update of `i` does not need any -synchronization. - -### Structured concurrency - -Since every `lookUp` and `enqueue` must resolve before advancing to the next -state, it means that concurrency is naturally limited to tree-structures. It's -possible to create hierarchical[^5] concurrency as shown in the following -example. - -![Structured Concurrency](/contribute/images/structured-concurrency.svg) - -It's hard to tell from the *UML* that the concurrency structure forms a tree. -There's an [alternate view](#concurrency-tree-diagram) that better shows the -tree structure. - -![Unstructured Concurrency](/contribute/images/unstructured-concurrency.svg) - -Structured concurrency is much easier to reason about. - -## Composition and control flow patterns - -This section presents examples for how multiple `StateMachine`s can be composed -and solutions to certain control flow problems. - -### Sequential states - -This is the most common and straightforward control flow pattern. An example of -this is shown in [Stateful computations inside -`SkyKeyComputeState`](#stateful-computations). - -### Branching - -Branching states in `StateMachine`s can be achieved by returning different -values using regular *Java* control flow, as shown in the following example. - -``` -class Branch implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - // Returns different state machines, depending on condition. - if (shouldUseA()) { - return this::performA; - } - return this::performB; - } - … -} -``` - -It’s very common for certain branches to return `DONE`, for early completion. - -### Advanced sequential composition - -Since the `StateMachine` control structure is memoryless, sharing `StateMachine` -definitions as subtasks can sometimes be awkward. Let *M1* and -*M2* be `StateMachine` instances that share a `StateMachine`, *S*, -with *M1* and *M2* being the sequences *<A, S, B>* and -*<X, S, Y>* respectively. The problem is that *S* doesn’t know whether to -continue to *B* or *Y* after it completes and `StateMachine`s don't quite keep a -call stack. This section reviews some techniques for achieving this. - -#### `StateMachine` as terminal sequence element - -This doesn’t solve the initial problem posed. It only demonstrates sequential -composition when the shared `StateMachine` is terminal in the sequence. - -``` -// S is the shared state machine. -class S implements StateMachine { … } - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - return new S(); - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - return new S(); - } -} -``` - -This works even if *S* is itself a complex state machine. - -#### Subtask for sequential composition - -Since enqueued subtasks are guaranteed to complete before the next state, it’s -sometimes possible to slightly abuse[^6] the subtask mechanism. - -``` -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // S starts after `step` returns and by contract must complete before `doB` - // begins. It is effectively sequential, inducing the sequence < A, S, B >. - tasks.enqueue(new S()); - return this::doB; - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Similarly, this induces the sequence < X, S, Y>. - tasks.enqueue(new S()); - return this::doY; - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -#### `runAfter` injection - -Sometimes, abusing `Tasks.enqueue` is impossible because there are other -parallel subtasks or `Tasks.lookUp` calls that must be completed before *S* -executes. In this case, injecting a `runAfter` parameter into *S* can be used to -inform *S* of what to do next. - -``` -class S implements StateMachine { - // Specifies what to run after S completes. - private final StateMachine runAfter; - - @Override - public StateMachine step(Tasks tasks) { - … // Performs some computations. - return this::processResults; - } - - @Nullable - private StateMachine processResults(Tasks tasks) { - … // Does some additional processing. - - // Executes the state machine defined by `runAfter` after S completes. - return runAfter; - } -} - -class M1 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performA(); - // Passes `this::doB` as the `runAfter` parameter of S, resulting in the - // sequence < A, S, B >. - return new S(/* runAfter= */ this::doB); - } - - private StateMachine doB(Tasks tasks) { - performB(); - return DONE; - } -} - -class M2 implements StateMachine { - @Override - public StateMachine step(Tasks tasks) { - performX(); - // Passes `this::doY` as the `runAfter` parameter of S, resulting in the - // sequence < X, S, Y >. - return new S(/* runAfter= */ this::doY); - } - - private StateMachine doY(Tasks tasks) { - performY(); - return DONE; - } -} -``` - -This approach is cleaner than abusing subtasks. However, applying this too -liberally, for example, by nesting multiple `StateMachine`s with `runAfter`, is -the road to [Callback Hell](#callback-hell). It’s better to break up sequential -`runAfter`s with ordinary sequential states instead. - -``` - return new S(/* runAfter= */ new T(/* runAfter= */ this::nextStep)) -``` - -can be replaced with the following. - -``` - private StateMachine step1(Tasks tasks) { - doStep1(); - return new S(/* runAfter= */ this::intermediateStep); - } - - private StateMachine intermediateStep(Tasks tasks) { - return new T(/* runAfter= */ this::nextStep); - } -``` - -Note: It's possible to pass `DONE` as the `runAfter` parameter when there's -nothing to run afterwards. - -Tip: When using `runAfter`, always annotate the parameter with `/* runAfter= */` -to let the reader know the meaning at the callsite. - -#### *Forbidden* alternative: `runAfterUnlessError` - -In an earlier draft, we had considered a `runAfterUnlessError` that would abort -early on errors. This was motivated by the fact that errors often end up getting -checked twice, once by the `StateMachine` that has a `runAfter` reference and -once by the `runAfter` machine itself. - -After some deliberation, we decided that uniformity of the code is more -important than deduplicating the error checking. It would be confusing if the -`runAfter` mechanism did not work in a consistent manner with the -`tasks.enqueue` mechanism, which always requires error checking. - -Warning: When using `runAfter`, the machine that has the injected `runAfter` -should invoke it unconditionally at completion, even on error, for consistency. - -### Direct delegation - -Each time there is a formal state transition, the main `Driver` loop advances. -As per contract, advancing states means that all previously enqueued SkyValue -lookups and subtasks resolve before the next state executes. Sometimes the logic -of a delegate `StateMachine` makes a phase advance unnecessary or -counterproductive. For example, if the first `step` of the delegate performs -SkyKey lookups that could be parallelized with lookups of the delegating state -then a phase advance would make them sequential. It could make more sense to -perform direct delegation, as shown in the example below. - -``` -class Parent implements StateMachine { - @Override - public StateMachine step(Tasks tasks ) { - tasks.lookUp(new Key1(), this); - // Directly delegates to `Delegate`. - // - // The (valid) alternative: - // return new Delegate(this::afterDelegation); - // would cause `Delegate.step` to execute after `step` completes which would - // cause lookups of `Key1` and `Key2` to be sequential instead of parallel. - return new Delegate(this::afterDelegation).step(tasks); - } - - private StateMachine afterDelegation(Tasks tasks) { - … - } -} - -class Delegate implements StateMachine { - private final StateMachine runAfter; - - Delegate(StateMachine runAfter) { - this.runAfter = runAfter; - } - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new Key2(), this); - return …; - } - - // Rest of implementation. - … - - private StateMachine complete(Tasks tasks) { - … - return runAfter; - } -} -``` - -## Data flow - -The focus of the previous discussion has been on managing control flow. This -section describes the propagation of data values. - -### Implementing `Tasks.lookUp` callbacks - -There’s an example of implementing a `Tasks.lookUp` callback in [SkyValue -lookups](#skyvalue-lookups). This section provides rationale and suggests -approaches for handling multiple SkyValues. - -#### `Tasks.lookUp` callbacks - -The `Tasks.lookUp` method takes a callback, `sink`, as a parameter. - -``` - void lookUp(SkyKey key, Consumer sink); -``` - -The idiomatic approach would be to use a *Java* lambda to implement this: - -``` - tasks.lookUp(key, value -> myValue = (MyValueClass)value); -``` - -with `myValue` being a member variable of the `StateMachine` instance doing the -lookup. However, the lambda requires an extra memory allocation compared to -implementing the `Consumer` interface in the `StateMachine` -implementation. The lambda is still useful when there are multiple lookups that -would be ambiguous. - -Note: Bikeshed warning. There is a noticeable difference of approximately 1% -end-to-end CPU usage when implementing callbacks systematically in -`StateMachine` implementations compared to using lambdas, which makes this -recommendation debatable. To avoid unnecessary debates, it is advised to leave -the decision up to the individual implementing the solution. - -There are also error handling overloads of `Tasks.lookUp`, that are analogous to -`SkyFunction.Environment.getValueOrThrow`. - -``` - void lookUp( - SkyKey key, Class exceptionClass, ValueOrExceptionSink sink); - - interface ValueOrExceptionSink { - void acceptValueOrException(@Nullable SkyValue value, @Nullable E exception); - } -``` - -An example implementation is shown below. - -``` -class PerformLookupWithError extends StateMachine, ValueOrExceptionSink { - private MyValue value; - private MyException error; - - @Override - public StateMachine step(Tasks tasks) { - tasks.lookUp(new MyKey(), MyException.class, ValueOrExceptionSink) this); - return this::processResult; - } - - @Override - public acceptValueOrException(@Nullable SkyValue value, @Nullable MyException exception) { - if (value != null) { - this.value = (MyValue)value; - return; - } - if (exception != null) { - this.error = exception; - return; - } - throw new IllegalArgumentException("Both parameters were unexpectedly null."); - } - - private StateMachine processResult(Tasks tasks) { - if (exception != null) { - // Handles the error. - … - return DONE; - } - // Processes `value`, which is non-null. - … - } -} -``` - -As with lookups without error handling, having the `StateMachine` class directly -implement the callback saves a memory allocation for the lamba. - -[Error handling](#error-handling) provides a bit more detail, but essentially, -there's not much difference between the propagation of errors and normal values. - -#### Consuming multiple SkyValues - -Multiple SkyValue lookups are often required. An approach that works much of the -time is to switch on the type of SkyValue. The following is an example that has -been simplified from prototype production code. - -``` - @Nullable - private StateMachine fetchConfigurationAndPackage(Tasks tasks) { - var configurationKey = configuredTarget.getConfigurationKey(); - if (configurationKey != null) { - tasks.lookUp(configurationKey, (Consumer) this); - } - - var packageId = configuredTarget.getLabel().getPackageIdentifier(); - tasks.lookUp(PackageValue.key(packageId), (Consumer) this); - - return this::constructResult; - } - - @Override // Implementation of `Consumer`. - public void accept(SkyValue value) { - if (value instanceof BuildConfigurationValue) { - this.configurationValue = (BuildConfigurationValue) value; - return; - } - if (value instanceof PackageValue) { - this.pkg = ((PackageValue) value).getPackage(); - return; - } - throw new IllegalArgumentException("unexpected value: " + value); - } -``` - -The `Consumer` callback implementation can be shared unambiguously -because the value types are different. When that’s not the case, falling back to -lambda-based implementations or full inner-class instances that implement the -appropriate callbacks is viable. - -### Propagating values between `StateMachine`s - -So far, this document has only explained how to arrange work in a subtask, but -subtasks also need to report a values back to the caller. Since subtasks are -logically asynchronous, their results are communicated back to the caller using -a *callback*. To make this work, the subtask defines a sink interface that is -injected via its constructor. - -``` -class BarProducer implements StateMachine { - // Callers of BarProducer implement the following interface to accept its - // results. Exactly one of the two methods will be called by the time - // BarProducer completes. - interface ResultSink { - void acceptBarValue(Bar value); - void acceptBarError(BarException exception); - } - - private final ResultSink sink; - - BarProducer(ResultSink sink) { - this.sink = sink; - } - - … // StateMachine steps that end with this::complete. - - private StateMachine complete(Tasks tasks) { - if (hasError()) { - sink.acceptBarError(getError()); - return DONE; - } - sink.acceptBarValue(getValue()); - return DONE; - } -} -``` - -Tip: It would be tempting to use the more concise signature void `accept(Bar -value)` rather than the stuttery `void acceptBarValue(Bar value)` above. -However, `Consumer` is a common overload of `void accept(Bar value)`, -so doing this often leads to violations of the [Overloads: never -split](https://google.github.io/styleguide/javaguide.html#s3.4.2-ordering-class-contents) -style-guide rule. - -Tip: Using a custom `ResultSink` type instead of a generic one from -`java.util.function` makes it easy to find implementations in the code base, -improving readability. - -A caller `StateMachine` would then look like the following. - -``` -class Caller implements StateMachine, BarProducer.ResultSink { - interface ResultSink { - void acceptCallerValue(Bar value); - void acceptCallerError(BarException error); - } - - private final ResultSink sink; - - private Bar value; - - Caller(ResultSink sink) { - this.sink = sink; - } - - @Override - @Nullable - public StateMachine step(Tasks tasks) { - tasks.enqueue(new BarProducer((BarProducer.ResultSink) this)); - return this::processResult; - } - - @Override - public void acceptBarValue(Bar value) { - this.value = value; - } - - @Override - public void acceptBarError(BarException error) { - sink.acceptCallerError(error); - } - - private StateMachine processResult(Tasks tasks) { - // Since all enqueued subtasks resolve before `processResult` starts, one of - // the `BarResultSink` callbacks must have been called by this point. - if (value == null) { - return DONE; // There was a previously reported error. - } - var finalResult = computeResult(value); - sink.acceptCallerValue(finalResult); - return DONE; - } -} -``` - -The preceding example demonstrates a few things. `Caller` has to propagate its -results back and defines its own `Caller.ResultSink`. `Caller` implements the -`BarProducer.ResultSink` callbacks. Upon resumption, `processResult` checks if -`value` is null to determine if an error occurred. This is a common behavior -pattern after accepting output from either a subtask or SkyValue lookup. - -Note that the implementation of `acceptBarError` eagerly forwards the result to -the `Caller.ResultSink`, as required by [Error bubbling](#error-bubbling). - -Alternatives for top-level `StateMachine`s are described in [`Driver`s and -bridging to SkyFunctions](#drivers-and-bridging). - -### Error handling - -There's a couple of examples of error handling already in [`Tasks.lookUp` -callbacks](#tasks-lookup-callbacks) and [Propagating values between -`StateMachines`](#propagating-values). Exceptions, other than -`InterruptedException` are not thrown, but instead passed around through -callbacks as values. Such callbacks often have exclusive-or semantics, with -exactly one of a value or error being passed. - -The next section describes a a subtle, but important interaction with Skyframe -error handling. - -#### Error bubbling (--nokeep\_going) - -Warning: Errors need to be eagerly propagated all the way back to the -SkyFunction for error bubbling to function correctly. - -During error bubbling, a SkyFunction may be restarted even if not all requested -SkyValues are available. In such cases, the subsequent state will never be -reached due to the `Tasks` API contract. However, the `StateMachine` should -still propagate the exception. - -Since propagation must occur regardless of whether the next state is reached, -the error handling callback must perform this task. For an inner `StateMachine`, -this is achieved by invoking the parent callback. - -At the top-level `StateMachine`, which interfaces with the SkyFunction, this can -be done by calling the `setException` method of `ValueOrExceptionProducer`. -`ValueOrExceptionProducer.tryProduceValue` will then throw the exception, even -if there are missing SkyValues. - -If a `Driver` is being utilized directly, it is essential to check for -propagated errors from the SkyFunction, even if the machine has not finished -processing. - -### Event Handling - -For SkyFunctions that need to emit events, a `StoredEventHandler` is injected -into SkyKeyComputeState and further injected into `StateMachine`s that require -them. Historically, the `StoredEventHandler` was needed due to Skyframe dropping -certain events unless they are replayed but this was subsequently fixed. -`StoredEventHandler` injection is preserved because it simplifies the -implementation of events emitted from error handling callbacks. - -## `Driver`s and bridging to SkyFunctions - -A `Driver` is responsible for managing the execution of `StateMachine`s, -beginning with a specified root `StateMachine`. As `StateMachine`s can -recursively enqueue subtask `StateMachine`s, a single `Driver` can manage -numerous subtasks. These subtasks create a tree structure, a result of -[Structured concurrency](#structured-concurrency). The `Driver` batches SkyValue -lookups across subtasks for improved efficiency. - -There are a number of classes built around the `Driver`, with the following API. - -``` -public final class Driver { - public Driver(StateMachine root); - public boolean drive(SkyFunction.Environment env) throws InterruptedException; -} -``` - -`Driver` takes a single root `StateMachine` as a parameter. Calling -`Driver.drive` executes the `StateMachine` as far as it can go without a -Skyframe restart. It returns true when the `StateMachine` completes and false -otherwise, indicating that not all values were available. - -`Driver` maintains the concurrent state of the `StateMachine` and it is well -suited for embedding in `SkyKeyComputeState`. - -### Directly instantiating `Driver` - -`StateMachine` implementations conventionally communicate their results via -callbacks. It's possible to directly instantiate a `Driver` as shown in the -following example. - -The `Driver` is embedded in the `SkyKeyComputeState` implementation along with -an implementation of the corresponding `ResultSink` to be defined a bit further -down. At the top level, the `State` object is an appropriate receiver for the -result of the computation as it is guaranteed to outlive `Driver`. - -``` -class State implements SkyKeyComputeState, ResultProducer.ResultSink { - // The `Driver` instance, containing the full tree of all `StateMachine` - // states. Responsible for calling `StateMachine.step` implementations when - // asynchronous values are available and performing batched SkyFrame lookups. - // - // Non-null while `result` is being computed. - private Driver resultProducer; - - // Variable for storing the result of the `StateMachine` - // - // Will be non-null after the computation completes. - // - private ResultType result; - - // Implements `ResultProducer.ResultSink`. - // - // `ResultProducer` propagates its final value through a callback that is - // implemented here. - @Override - public void acceptResult(ResultType result) { - this.result = result; - } -} -``` - -The code below sketches the `ResultProducer`. - -``` -class ResultProducer implements StateMachine { - interface ResultSink { - void acceptResult(ResultType value); - } - - private final Parameters parameters; - private final ResultSink sink; - - … // Other internal state. - - ResultProducer(Parameters parameters, ResultSink sink) { - this.parameters = parameters; - this.sink = sink; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. - return this::complete; - } - - private StateMachine complete(Tasks tasks) { - sink.acceptResult(getResult()); - return DONE; - } -} -``` - -Then the code for lazily computing the result could look like the following. - -``` -@Nullable -private Result computeResult(State state, Skyfunction.Environment env) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new Driver(new ResultProducer( - new Parameters(), (ResultProducer.ResultSink)state)); - } - if (state.resultProducer.drive(env)) { - // Clears the `Driver` instance as it is no longer needed. - state.resultProducer = null; - } - return state.result; -} -``` - -### Embedding `Driver` - -If the `StateMachine` produces a value and raises no exceptions, embedding -`Driver` is another possible implementation, as shown in the following example. - -``` -class ResultProducer implements StateMachine { - private final Parameters parameters; - private final Driver driver; - - private ResultType result; - - ResultProducer(Parameters parameters) { - this.parameters = parameters; - this.driver = new Driver(this); - } - - @Nullable // Null when a Skyframe restart is needed. - public ResultType tryProduceValue( SkyFunction.Environment env) - throws InterruptedException { - if (!driver.drive(env)) { - return null; - } - return result; - } - - @Override - public StateMachine step(Tasks tasks) { - … // Implementation. -} -``` - -The SkyFunction may have code that looks like the following (where `State` is -the function specific type of `SkyKeyComputeState`). - -``` -@Nullable // Null when a Skyframe restart is needed. -Result computeResult(SkyFunction.Environment env, State state) - throws InterruptedException { - if (state.result != null) { - return state.result; - } - if (state.resultProducer == null) { - state.resultProducer = new ResultProducer(new Parameters()); - } - var result = state.resultProducer.tryProduceValue(env); - if (result == null) { - return null; - } - state.resultProducer = null; - return state.result = result; -} -``` - -Embedding `Driver` in the `StateMachine` implementation is a better fit for -Skyframe's synchronous coding style. - -### StateMachines that may produce exceptions - -Otherwise, there are `SkyKeyComputeState`-embeddable `ValueOrExceptionProducer` -and `ValueOrException2Producer` classes that have synchronous APIs to match -synchronous SkyFunction code. - -The `ValueOrExceptionProducer` abstract class includes the following methods. - -``` -public abstract class ValueOrExceptionProducer - implements StateMachine { - @Nullable - public final V tryProduceValue(Environment env) - throws InterruptedException, E { - … // Implementation. - } - - protected final void setValue(V value) { … // Implementation. } - protected final void setException(E exception) { … // Implementation. } -} -``` - -It includes an embedded `Driver` instance and closely resembles the -`ResultProducer` class in [Embedding driver](#embedding-driver) and interfaces -with the SkyFunction in a similar manner. Instead of defining a `ResultSink`, -implementations call `setValue` or `setException` when either of those occur. -When both occur, the exception takes priority. The `tryProduceValue` method -bridges the asynchronous callback code to synchronous code and throws an -exception when one is set. - -As previously noted, during error bubbling, it's possible for an error to occur -even if the machine is not yet done because not all inputs are available. To -accommodate this, `tryProduceValue` throws any set exceptions, even before the -machine is done. - -## Epilogue: Eventually removing callbacks - -`StateMachine`s are a highly efficient, but boilerplate intensive way to perform -asynchronous computation. Continuations (particularly in the form of `Runnable`s -passed to `ListenableFuture`) are widespread in certain parts of *Bazel* code, -but aren't prevalent in analysis SkyFunctions. Analysis is mostly CPU bound and -there are no efficient asynchronous APIs for disk I/O. Eventually, it would be -good to optimize away callbacks as they have a learning curve and impede -readability. - -One of the most promising alternatives is *Java* virtual threads. Instead of -having to write callbacks, everything is replaced with synchronous, blocking -calls. This is possible because tying up a virtual thread resource, unlike a -platform thread, is supposed to be cheap. However, even with virtual threads, -replacing simple synchronous operations with thread creation and synchronization -primitives is too expensive. We performed a migration from `StateMachine`s to -*Java* virtual threads and they were orders of magnitude slower, leading to -almost a 3x increase in end-to-end analysis latency. Since virtual threads are -still a preview feature, it's possible that this migration can be performed at a -later date when performance improves. - -Another approach to consider is waiting for *Loom* coroutines, if they ever -become available. The advantage here is that it might be possible to reduce -synchronization overhead by using cooperative multitasking. - -If all else fails, low-level bytecode rewriting could also be a viable -alternative. With enough optimization, it might be possible to achieve -performance that approaches hand-written callback code. - -## Appendix - -### Callback Hell - -Callback hell is an infamous problem in asynchronous code that uses callbacks. -It stems from the fact that the continuation for a subsequent step is nested -within the previous step. If there are many steps, this nesting can be extremely -deep. If coupled with control flow the code becomes unmanageable. - -``` -class CallbackHell implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return (t, l) -> { - doB(); - return (t1, l2) -> { - doC(); - return DONE; - }; - }; - } -} -``` - -One of the advantages of nested implementations is that the stack frame of the -outer step can be preserved. In *Java*, captured lambda variables must be -effectively final so using such variables can be cumbersome. Deep nesting is -avoided by returning method references as continuations instead of lambdas as -shown as follows. - -``` -class CallbackHellAvoided implements StateMachine { - @Override - public StateMachine step(Tasks task) { - doA(); - return this::step2; - } - - private StateMachine step2(Tasks tasks) { - doB(); - return this::step3; - } - - private StateMachine step3(Tasks tasks) { - doC(); - return DONE; - } -} -``` - -Callback hell may also occur if the [`runAfter` injection](#runafter-injection) -pattern is used too densely, but this can be avoided by interspersing injections -with sequential steps. - -#### Example: Chained SkyValue lookups - -It is often the case that the application logic requires dependent chains of -SkyValue lookups, for example, if a second SkyKey depends on the first SkyValue. -Thinking about this naively, this would result in a complex, deeply nested -callback structure. - -``` -private ValueType1 value1; -private ValueType2 value2; - -private StateMachine step1(...) { - tasks.lookUp(key1, (Consumer) this); // key1 has type KeyType1. - return this::step2; -} - -@Override -public void accept(SkyValue value) { - this.value1 = (ValueType1) value; -} - -private StateMachine step2(...) { - KeyType2 key2 = computeKey(value1); - tasks.lookup(key2, this::acceptValueType2); - return this::step3; -} - -private void acceptValueType2(SkyValue value) { - this.value2 = (ValueType2) value; -} -``` - -However, since continuations are specified as method references, the code looks -procedural across state transitions: `step2` follows `step1`. Note that here, a -lambda is used to assign `value2`. This makes the ordering of the code match the -ordering of the computation from top-to-bottom. - -### Miscellaneous Tips - -#### Readability: Execution Ordering - -To improve readability, strive to keep the `StateMachine.step` implementations -in execution order and callback implementations immediately following where they -are passed in the code. This isn't always possible where the control flow -branches. Additional comments might be helpful in such cases. - -In [Example: Chained SkyValue lookups](#chained-skyvalue-lookups), an -intermediate method reference is created to achieve this. This trades a small -amount of performance for readability, which is likely worthwhile here. - -#### Generational Hypothesis - -Medium-lived *Java* objects break the generational hypothesis of the *Java* -garbage collector, which is designed to handle objects that live for a very -short time or objects that live forever. By definition, objects in -`SkyKeyComputeState` violate this hypothesis. Such objects, containing the -constructed tree of all still-running `StateMachine`s, rooted at `Driver` have -an intermediate lifespan as they suspend, waiting for asynchronous computations -to complete. - -It seems less bad in JDK19, but when using `StateMachine`s, it's sometimes -possible to observe an increase in GC time, even with dramatic decreases in -actual garbage generated. Since `StateMachine`s have an intermediate lifespan -they could be promoted to old gen, causing it to fill up more quickly, thus -necessitating more expensive major or full GCs to clean up. - -The initial precaution is to minimize the use of `StateMachine` variables, but -it is not always feasible, for example, if a value is needed across multiple -states. Where it is possible, local stack `step` variables are young generation -variables and efficiently GC'd. - -For `StateMachine` variables, breaking things down into subtasks and following -the recommended pattern for [Propagating values between -`StateMachine`s](#propagating-values) is also helpful. Observe that when -following the pattern, only child `StateMachine`s have references to parent -`StateMachine`s and not vice versa. This means that as children complete and -update the parents using result callbacks, the children naturally fall out of -scope and become eligible for GC. - -Finally, in some cases, a `StateMachine` variable is needed in earlier states -but not in later states. It can be beneficial to null out references of large -objects once it is known that they are no longer needed. - -#### Naming states - -When naming a method, it's usually possible to name a method for the behavior -that happens within that method. It's less clear how to do this in -`StateMachine`s because there is no stack. For example, suppose method `foo` -calls a sub-method `bar`. In a `StateMachine`, this could be translated into the -state sequence `foo`, followed by `bar`. `foo` no longer includes the behavior -`bar`. As a result, method names for states tend to be narrower in scope, -potentially reflecting local behavior. - -### Concurrency tree diagram - -The following is an alternative view of the diagram in [Structured -concurrency](#structured-concurrency) that better depicts the tree structure. -The blocks form a small tree. - -![Structured Concurrency 3D](/contribute/images/structured-concurrency-3d.svg) - -[^1]: In contrast to Skyframe's convention of restarting from the beginning when - values are not available. -[^2]: Note that `step` is permitted to throw `InterruptedException`, but the - examples omit this. There are a few low methods in *Bazel* code that throw - this exception and it propagates up to the `Driver`, to be described later, - that runs the `StateMachine`. It's fine to not declare it to be thrown when - unneeded. -[^3]: Concurrent subtasks were motivated by the `ConfiguredTargetFunction` which - performs *independent* work for each dependency. Instead of manipulating - complex data structures that process all the dependencies at once, - introducing inefficiencies, each dependency has its own independent - `StateMachine`. -[^4]: Multiple `tasks.lookUp` calls within a single step are batched together. - Additional batching can be created by lookups occurring within concurrent - subtasks. -[^5]: This is conceptually similar to Java’s structured concurrency - [jeps/428](https://openjdk.org/jeps/428). -[^6]: Doing this is similar to spawning a thread and joining it to achieve - sequential composition. diff --git a/8.4.2/contribute/windows-chocolatey-maintenance.mdx b/8.4.2/contribute/windows-chocolatey-maintenance.mdx deleted file mode 100644 index c6aee8f..0000000 --- a/8.4.2/contribute/windows-chocolatey-maintenance.mdx +++ /dev/null @@ -1,72 +0,0 @@ ---- -title: 'Maintaining Bazel Chocolatey package on Windows' ---- - - - -Note: The Chocolatey package is experimental; please provide feedback -(`@petemounce` in issue tracker). - -## Prerequisites - -You need: - -* [chocolatey package manager](https://chocolatey.org) installed -* (to publish) a chocolatey API key granting you permission to publish the - `bazel` package - * [@petemounce](https://github.com/petemounce) currently - maintains this unofficial package. -* (to publish) to have set up that API key for the chocolatey source locally - via `choco apikey -k -s https://chocolatey.org/` - -## Build - -Compile bazel with msys2 shell and `compile.sh`. - -```powershell -pushd scripts/packages/chocolatey - ./build.ps1 -version 0.3.2 -mode local -popd -``` - -Should result in `scripts/packages/chocolatey/bazel..nupkg` being -created. - -The `build.ps1` script supports `mode` values `local`, `rc` and `release`. - -## Test - -0. Build the package (with `-mode local`) - - * run a webserver (`python -m SimpleHTTPServer` in - `scripts/packages/chocolatey` is convenient and starts one on - `http://localhost:8000`) - -0. Test the install - - The `test.ps1` should install the package cleanly (and error if it did not - install cleanly), then tell you what to do next. - -0. Test the uninstall - - ```sh - choco uninstall bazel - # should remove bazel from the system - ``` - -Chocolatey's moderation process automates checks here as well. - -## Release - -Modify `tools/parameters.json` for the new release's URI and checksum once the -release has been published to github releases. - -```powershell -./build.ps1 -version -isRelease -./test.ps1 -version -# if the test.ps1 passes -choco push bazel.x.y.z.nupkg --source https://chocolatey.org/ -``` - -Chocolatey.org will then run automated checks and respond to the push via email -to the maintainers. diff --git a/8.4.2/contribute/windows-scoop-maintenance.mdx b/8.4.2/contribute/windows-scoop-maintenance.mdx deleted file mode 100644 index 58e2a6c..0000000 --- a/8.4.2/contribute/windows-scoop-maintenance.mdx +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: 'Maintaining Bazel Scoop package on Windows' ---- - - - -Note: The Scoop package is experimental. To provide feedback, go to -`@excitoon` in issue tracker. - -## Prerequisites - -You need: - -* [Scoop package manager](https://scoop.sh/) installed -* GitHub account in order to publish and create pull requests to - [scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) - * [@excitoon](https://github.com/excitoon) currently maintains this - unofficial package. Feel free to ask questions by - [e-mail](mailto:vladimir.chebotarev@gmail.com) or - [Telegram](http://telegram.me/excitoon). - -## Release process - -Scoop packages are very easy to maintain. Once you have the URL of released -Bazel, you need to make appropriate changes in -[this file](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json): - -- update version -- update dependencies if needed -- update URL -- update hash (`sha256` by default) - -In your filesystem, `bazel.json` is located in the directory -`%UserProfile%/scoop/buckets/main/bucket` by default. This directory belongs to -your clone of a Git repository -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main). - -Test the result: - -``` -scoop uninstall bazel -scoop install bazel -bazel version -bazel something_else -``` - -The first time, make a fork of -[scoopinstaller/scoop-main](https://github.com/scoopinstaller/scoop-main) and -specify it as your own remote for `%UserProfile%/scoop/buckets/main`: - -``` -git remote add mine FORK_URL -``` - -Push your changes to your fork and create a pull request. diff --git a/8.4.2/docs/android-build-performance.mdx b/8.4.2/docs/android-build-performance.mdx deleted file mode 100644 index 0d5edc7..0000000 --- a/8.4.2/docs/android-build-performance.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: 'Android Build Performance' ---- - - - -This page contains information on optimizing build performance for Android -apps specifically. For general build performance optimization with Bazel, see -[Optimizing Performance](/rules/performance). - -## Recommended flags - -The flags are in the -[`bazelrc` configuration syntax](/run/bazelrc#bazelrc-syntax-semantics), so -they can be pasted directly into a `bazelrc` file and invoked with -`--config=` on the command line. - -**Profiling performance** - -Bazel writes a JSON trace profile by default to a file called -`command.profile.gz` in Bazel's output base. -See the [JSON Profile documentation](/rules/performance#performance-profiling) for -how to read and interact with the profile. - -**Persistent workers for Android build actions**. - -A subset of Android build actions has support for -[persistent workers](https://blog.bazel.build/2015/12/10/java-workers.html). - -These actions' mnemonics are: - -* DexBuilder -* Javac -* Desugar -* AaptPackage -* AndroidResourceParser -* AndroidResourceValidator -* AndroidResourceCompiler -* RClassGenerator -* AndroidResourceLink -* AndroidAapt2 -* AndroidAssetMerger -* AndroidResourceMerger -* AndroidCompiledResourceMerger - -Enabling workers can result in better build performance by saving on JVM -startup costs from invoking each of these tools, but at the cost of increased -memory usage on the system by persisting them. - -To enable workers for these actions, apply these flags with -`--config=android_workers` on the command line: - -``` -build:android_workers --strategy=DexBuilder=worker -build:android_workers --strategy=Javac=worker -build:android_workers --strategy=Desugar=worker - -# A wrapper flag for these resource processing actions: -# - AndroidResourceParser -# - AndroidResourceValidator -# - AndroidResourceCompiler -# - RClassGenerator -# - AndroidResourceLink -# - AndroidAapt2 -# - AndroidAssetMerger -# - AndroidResourceMerger -# - AndroidCompiledResourceMerger -build:android_workers --persistent_android_resource_processor -``` - -The default number of persistent workers created per action is `4`. We have -[measured improved build performance](https://github.com/bazelbuild/bazel/issues/8586#issuecomment-500070549) -by capping the number of instances for each action to `1` or `2`, although this -may vary depending on the system Bazel is running on, and the project being -built. - -To cap the number of instances for an action, apply these flags: - -``` -build:android_workers --worker_max_instances=DexBuilder=2 -build:android_workers --worker_max_instances=Javac=2 -build:android_workers --worker_max_instances=Desugar=2 -build:android_workers --worker_max_instances=AaptPackage=2 -# .. and so on for each action you're interested in. -``` - -**Using AAPT2** - -[`aapt2`](https://developer.android.com/studio/command-line/aapt2) has improved -performance over `aapt` and also creates smaller APKs. To use `aapt2`, use the -`--android_aapt=aapt2` flag or set `aapt2` on the `aapt_version` on -`android_binary` and `android_local_test`. - -**SSD optimizations** - -The `--experimental_multi_threaded_digest` flag is useful for optimizing digest -computation on SSDs. diff --git a/8.4.2/docs/android-instrumentation-test.mdx b/8.4.2/docs/android-instrumentation-test.mdx deleted file mode 100644 index bf0ff76..0000000 --- a/8.4.2/docs/android-instrumentation-test.mdx +++ /dev/null @@ -1,579 +0,0 @@ ---- -title: 'Android Instrumentation Tests' ---- - - - -_If you're new to Bazel, start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -![Running Android instrumentation tests in parallel](/docs/images/android_test.gif "Android instrumentation test") - -**Figure 1.** Running parallel Android instrumentation tests. - -[`android_instrumentation_test`](/reference/be/android#android_instrumentation_test) -allows developers to test their apps on Android emulators and devices. -It utilizes real Android framework APIs and the Android Test Library. - -For hermeticity and reproducibility, Bazel creates and launches Android -emulators in a sandbox, ensuring that tests always run from a clean state. Each -test gets an isolated emulator instance, allowing tests to run in parallel -without passing states between them. - -For more information on Android instrumentation tests, check out the [Android -developer -documentation](https://developer.android.com/training/testing/unit-testing/instrumented-unit-tests.html). - -Please file issues in the [GitHub issue tracker](https://github.com/bazelbuild/bazel/issues). - -## How it works - -When you run `bazel test` on an `android_instrumentation_test` target for the -first time, Bazel performs the following steps: - -1. Builds the test APK, APK under test, and their transitive dependencies -2. Creates, boots, and caches clean emulator states -3. Starts the emulator -4. Installs the APKs -5. Runs tests utilizing the [Android Test Orchestrator](https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator) -6. Shuts down the emulator -7. Reports the results - -In subsequent test runs, Bazel boots the emulator from the clean, cached state -created in step 2, so there are no leftover states from previous runs. Caching -emulator state also speeds up test runs. - -## Prerequisites - -Ensure your environment satisfies the following prerequisites: - -- **Linux**. Tested on Ubuntu 16.04, and 18.04. - -- **Bazel 0.12.0** or later. Verify the version by running `bazel info release`. - -```posix-terminal -bazel info release -``` -This results in output similar to the following: - -```none {:.devsite-disable-click-to-copy} -release 4.1.0 -``` - -- **KVM**. Bazel requires emulators to have [hardware - acceleration](https://developer.android.com/studio/run/emulator-acceleration.html#accel-check) - with KVM on Linux. You can follow these - [installation instructions](https://help.ubuntu.com/community/KVM/Installation) - for Ubuntu. - -To verify that KVM has the correct configuration, run: - -```posix-terminal -apt-get install cpu-checker && kvm-ok -``` - -If it prints the following message, you have the correct configuration: - -```none {:.devsite-disable-click-to-copy} -INFO: /dev/kvm exists -KVM acceleration can be used -``` - -- **Xvfb**. To run headless tests (for example, on CI servers), Bazel requires - the [X virtual framebuffer](https://www.x.org/archive/X11R7.6/doc/man/man1/Xvfb.1.xhtml). - -To install it, run: - -```posix-terminal -apt-get install xvfb -``` -Verify that `Xvfb` is installed correctly and is installed at `/usr/bin/Xvfb` -by running: - -```posix-terminal -which Xvfb -``` -The output is the following: - -```{:.devsite-disable-click-to-copy} -/usr/bin/Xvfb -``` - -- **32-bit Libraries**. Some of the binaries used by the test infrastructure are - 32-bit, so on 64-bit machines, ensure that 32-bit binaries can be run. For - Ubuntu, install these 32-bit libraries: - -```posix-terminal -sudo apt-get install libc6:i386 libncurses5:i386 libstdc++6:i386 lib32z1 libbz2-1.0:i386 -``` - -## Getting started - -Here is a typical target dependency graph of an `android_instrumentation_test`: - -![The target dependency graph on an Android instrumentation test](/docs/images/android_instrumentation_test.png "Target dependency graph") - -**Figure 2.** Target dependency graph of an `android_instrumentation_test`. - - -### BUILD file - -The graph translates into a `BUILD` file like this: - -```python -android_instrumentation_test( - name = "my_test", - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86", -) - -# Test app and library -android_binary( - name = "my_test_app", - instruments = ":my_app", - manifest = "AndroidTestManifest.xml", - deps = [":my_test_lib"], - # ... -) - -android_library( - name = "my_test_lib", - srcs = glob(["javatest/**/*.java"]), - deps = [ - ":my_app_lib", - "@maven//:androidx_test_core", - "@maven//:androidx_test_runner", - "@maven//:androidx_test_espresso_espresso_core", - ], - # ... -) - -# Target app and library under test -android_binary( - name = "my_app", - manifest = "AndroidManifest.xml", - deps = [":my_app_lib"], - # ... -) - -android_library( - name = "my_app_lib", - srcs = glob(["java/**/*.java"]), - deps = [ - "@maven//:androidx_appcompat_appcompat", - "@maven//:androidx_annotation_annotation", - ] - # ... -) -``` - -The main attributes of the rule `android_instrumentation_test` are: - -- `test_app`: An `android_binary` target. This target contains test code and - dependencies like Espresso and UIAutomator. The selected `android_binary` - target is required to specify an `instruments` attribute pointing to another - `android_binary`, which is the app under test. - -- `target_device`: An `android_device` target. This target describes the - specifications of the Android emulator which Bazel uses to create, launch and - run the tests. See the [section on choosing an Android - device](#android-device-target) for more information. - -The test app's `AndroidManifest.xml` must include [an `` -tag](https://developer.android.com/studio/test/#configure_instrumentation_manifest_settings). -This tag must specify the attributes for the **package of the target app** and -the **fully qualified class name of the instrumentation test runner**, -`androidx.test.runner.AndroidJUnitRunner`. - -Here is an example `AndroidTestManifest.xml` for the test app: - -```xml - - - - - - - - - - - -``` - -### WORKSPACE dependencies - -In order to use this rule, your project needs to depend on these external -repositories: - -- `@androidsdk`: The Android SDK. Download this through Android Studio. - -- `@android_test_support`: Hosts the test runner, emulator launcher, and - `android_device` targets. You can find the [latest release - here](https://github.com/android/android-test/releases). - -Enable these dependencies by adding the following lines to your `WORKSPACE` -file: - -```python -# Android SDK -android_sdk_repository( - name = "androidsdk", - path = "/path/to/sdk", # or set ANDROID_HOME -) - -# Android Test Support -ATS_COMMIT = "$COMMIT_HASH" -http_archive( - name = "android_test_support", - strip_prefix = "android-test-%s" % ATS_COMMIT, - urls = ["https://github.com/android/android-test/archive/%s.tar.gz" % ATS_COMMIT], -) -load("@android_test_support//:repo.bzl", "android_test_repositories") -android_test_repositories() -``` - -## Maven dependencies - -For managing dependencies on Maven artifacts from repositories, such as [Google -Maven](https://maven.google.com) or [Maven Central](https://central.maven.org), -you should use a Maven resolver, such as -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external). - -The rest of this page shows how to use `rules_jvm_external` to -resolve and fetch dependencies from Maven repositories. - -## Choosing an android_device target - -`android_instrumentation_test.target_device` specifies which Android device to -run the tests on. These `android_device` targets are defined in -[`@android_test_support`](https://github.com/google/android-testing-support-library/tree/master/tools/android/emulated_devices). - -For example, you can query for the sources for a particular target by running: - -```posix-terminal -bazel query --output=build @android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86 -``` -Which results in output that looks similar to: - -```python -# .../external/android_test_support/tools/android/emulated_devices/generic_phone/BUILD:43:1 -android_device( - name = "android_23_x86", - visibility = ["//visibility:public"], - tags = ["requires-kvm"], - generator_name = "generic_phone", - generator_function = "make_device", - generator_location = "tools/android/emulated_devices/generic_phone/BUILD:43", - vertical_resolution = 800, - horizontal_resolution = 480, - ram = 2048, - screen_density = 240, - cache = 32, - vm_heap = 256, - system_image = "@android_test_support//tools/android/emulated_devices/generic_phone:android_23_x86_images", - default_properties = "@android_test_support//tools/android/emulated_devices/generic_phone:_android_23_x86_props", -) -``` - -The device target names use this template: - -``` -@android_test_support//tools/android/emulated_devices/{{ "" }}device_type{{ "" }}:{{ "" }}system{{ "" }}_{{ "" }}api_level{{ "" }}_x86_qemu2 -``` - -In order to launch an `android_device`, the `system_image` for the selected API -level is required. To download the system image, use Android SDK's -`tools/bin/sdkmanager`. For example, to download the system image for -`generic_phone:android_23_x86`, run `$sdk/tools/bin/sdkmanager -"system-images;android-23;default;x86"`. - -To see the full list of supported `android_device` targets in -`@android_test_support`, run the following command: - -```posix-terminal -bazel query 'filter("x86_qemu2$", kind(android_device, @android_test_support//tools/android/emulated_devices/...:*))' -``` - -Bazel currently supports x86-based emulators only. For better performance, use -`QEMU2` `android_device` targets instead of `QEMU` ones. - -## Running tests - -To run tests, add these lines to your project's -`{{ '' }}project root{{ '' }}:{{ '' }}/.bazelrc` file. - -``` -# Configurations for testing with Bazel -# Select a configuration by running -# `bazel test //my:target --config={headless, gui, local_device}` - -# Headless instrumentation tests (No GUI) -test:headless --test_arg=--enable_display=false - -# Graphical instrumentation tests. Ensure that $DISPLAY is set. -test:gui --test_env=DISPLAY -test:gui --test_arg=--enable_display=true - -# Testing with a local emulator or device. Ensure that `adb devices` lists the -# device. -# Run tests serially. -test:local_device --test_strategy=exclusive -# Use the local device broker type, as opposed to WRAPPED_EMULATOR. -test:local_device --test_arg=--device_broker_type=LOCAL_ADB_SERVER -# Uncomment and set $device_id if there is more than one connected device. -# test:local_device --test_arg=--device_serial_number=$device_id -``` - -Then, use one of the configurations to run tests: - -- `bazel test //my/test:target --config=gui` -- `bazel test //my/test:target --config=headless` -- `bazel test //my/test:target --config=local_device` - -Use __only one configuration__ or tests will fail. - -### Headless testing - -With `Xvfb`, it is possible to test with emulators without the graphical -interface, also known as headless testing. To disable the graphical interface -when running tests, pass the test argument `--enable_display=false` to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=false -``` - -### GUI testing - -If the `$DISPLAY` environment variable is set, it's possible to enable the -graphical interface of the emulator while the test is running. To do this, pass -these test arguments to Bazel: - -```posix-terminal -bazel test //my/test:target --test_arg=--enable_display=true --test_env=DISPLAY -``` - -### Testing with a local emulator or device - -Bazel also supports testing directly on a locally launched emulator or connected -device. Pass the flags -`--test_strategy=exclusive` and -`--test_arg=--device_broker_type=LOCAL_ADB_SERVER` to enable local testing mode. -If there is more than one connected device, pass the flag -`--test_arg=--device_serial_number=$device_id` where `$device_id` is the id of -the device/emulator listed in `adb devices`. - -## Sample projects - -If you are looking for canonical project samples, see the [Android testing -samples](https://github.com/googlesamples/android-testing#experimental-bazel-support) -for projects using Espresso and UIAutomator. - -## Espresso setup - -If you write UI tests with [Espresso](https://developer.android.com/training/testing/espresso/) -(`androidx.test.espresso`), you can use the following snippets to set up your -Bazel workspace with the list of commonly used Espresso artifacts and their -dependencies: - -``` -androidx.test.espresso:espresso-core -androidx.test:rules -androidx.test:runner -javax.inject:javax.inject -org.hamcrest:java-hamcrest -junit:junit -``` - -One way to organize these dependencies is to create a `//:test_deps` shared -library in your `{{ "" }}project root{{ "" }}/BUILD.bazel` file: - -```python -java_library( - name = "test_deps", - visibility = ["//visibility:public"], - exports = [ - "@maven//:androidx_test_espresso_espresso_core", - "@maven//:androidx_test_rules", - "@maven//:androidx_test_runner", - "@maven//:javax_inject_javax_inject" - "@maven//:org_hamcrest_java_hamcrest", - "@maven//:junit_junit", - ], -) -``` - -Then, add the required dependencies in `{{ "" }}project root{{ "" }}/WORKSPACE`: - -```python -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -RULES_JVM_EXTERNAL_TAG = "2.8" -RULES_JVM_EXTERNAL_SHA = "79c9850690d7614ecdb72d68394f994fef7534b292c4867ce5e7dec0aa7bdfad" - -http_archive( - name = "rules_jvm_external", - strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, - sha256 = RULES_JVM_EXTERNAL_SHA, - url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, -) - -load("@rules_jvm_external//:defs.bzl", "maven_install") - -maven_install( - artifacts = [ - "junit:junit:4.12", - "javax.inject:javax.inject:1", - "org.hamcrest:java-hamcrest:2.0.0.0" - "androidx.test.espresso:espresso-core:3.1.1", - "androidx.test:rules:aar:1.1.1", - "androidx.test:runner:aar:1.1.1", - ], - repositories = [ - "https://maven.google.com", - "https://repo1.maven.org/maven2", - ], -) -``` - -Finally, in your test `android_binary` target, add the `//:test_deps` -dependency: - -```python -android_binary( - name = "my_test_app", - instruments = "//path/to:app", - deps = [ - "//:test_deps", - # ... - ], - # ... -) -``` - -## Tips - -### Reading test logs - -Use `--test_output=errors` to print logs for failing tests, or -`--test_output=all` to print all test output. If you're looking for an -individual test log, go to -`$PROJECT_ROOT/bazel-testlogs/path/to/InstrumentationTestTargetName`. - -For example, the test logs for `BasicSample` canonical project are in -`bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest`, run: - -```posix-terminal -tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -``` -This results in the following output: - -```none - -$ tree bazel-testlogs/ui/espresso/BasicSample/BasicSampleInstrumentationTest -. -├── adb.409923.log -├── broker_logs -│   ├── aapt_binary.10.ok.txt -│   ├── aapt_binary.11.ok.txt -│   ├── adb.12.ok.txt -│   ├── adb.13.ok.txt -│   ├── adb.14.ok.txt -│   ├── adb.15.fail.txt -│   ├── adb.16.ok.txt -│   ├── adb.17.fail.txt -│   ├── adb.18.ok.txt -│   ├── adb.19.fail.txt -│   ├── adb.20.ok.txt -│   ├── adb.21.ok.txt -│   ├── adb.22.ok.txt -│   ├── adb.23.ok.txt -│   ├── adb.24.fail.txt -│   ├── adb.25.ok.txt -│   ├── adb.26.fail.txt -│   ├── adb.27.ok.txt -│   ├── adb.28.fail.txt -│   ├── adb.29.ok.txt -│   ├── adb.2.ok.txt -│   ├── adb.30.ok.txt -│   ├── adb.3.ok.txt -│   ├── adb.4.ok.txt -│   ├── adb.5.ok.txt -│   ├── adb.6.ok.txt -│   ├── adb.7.ok.txt -│   ├── adb.8.ok.txt -│   ├── adb.9.ok.txt -│   ├── android_23_x86.1.ok.txt -│   └── exec-1 -│   ├── adb-2.txt -│   ├── emulator-2.txt -│   └── mksdcard-1.txt -├── device_logcat -│   └── logcat1635880625641751077.txt -├── emulator_itCqtc.log -├── outputs.zip -├── pipe.log.txt -├── telnet_pipe.log.txt -└── tmpuRh4cy - ├── watchdog.err - └── watchdog.out - -4 directories, 41 files -``` - -### Reading emulator logs - -The emulator logs for `android_device` targets are stored in the `/tmp/` -directory with the name `emulator_xxxxx.log`, where `xxxxx` is a -randomly-generated sequence of characters. - -Use this command to find the latest emulator log: - -```posix-terminal -ls -1t /tmp/emulator_*.log | head -n 1 -``` - -### Testing against multiple API levels - -If you would like to test against multiple API levels, you can use a list -comprehension to create test targets for each API level. For example: - -```python -API_LEVELS = [ - "19", - "20", - "21", - "22", -] - -[android_instrumentation_test( - name = "my_test_%s" % API_LEVEL, - test_app = ":my_test_app", - target_device = "@android_test_support//tools/android/emulated_devices/generic_phone:android_%s_x86_qemu2" % API_LEVEL, -) for API_LEVEL in API_LEVELS] -``` - -## Known issues - -- [Forked adb server processes are not terminated after - tests](https://github.com/bazelbuild/bazel/issues/4853) -- While APK building works on all platforms (Linux, macOS, Windows), testing - only works on Linux. -- Even with `--config=local_adb`, users still need to specify - `android_instrumentation_test.target_device`. -- If using a local device or emulator, Bazel does not uninstall the APKs after - the test. Clean the packages by running this command: - -```posix-terminal -adb shell pm list -packages com.example.android.testing | cut -d ':' -f 2 | tr -d '\r' | xargs --L1 -t adb uninstall -``` diff --git a/8.4.2/docs/android-ndk.mdx b/8.4.2/docs/android-ndk.mdx deleted file mode 100644 index b10a566..0000000 --- a/8.4.2/docs/android-ndk.mdx +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: 'Using the Android Native Development Kit with Bazel' ---- - - - -_If you're new to Bazel, please start with the [Building Android with -Bazel](/start/android-app ) tutorial._ - -## Overview - -Bazel can run in many different build configurations, including several that use -the Android Native Development Kit (NDK) toolchain. This means that normal -`cc_library` and `cc_binary` rules can be compiled for Android directly within -Bazel. Bazel accomplishes this by using the `android_ndk_repository` repository -rule. - -## Prerequisites - -Please ensure that you have installed the Android SDK and NDK. - -To set up the SDK and NDK, add the following snippet to your `WORKSPACE`: - -```python -android_sdk_repository( - name = "androidsdk", # Required. Name *must* be "androidsdk". - path = "/path/to/sdk", # Optional. Can be omitted if `ANDROID_HOME` environment variable is set. -) - -android_ndk_repository( - name = "androidndk", # Required. Name *must* be "androidndk". - path = "/path/to/ndk", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. -) -``` - -For more information about the `android_ndk_repository` rule, see the [Build -Encyclopedia entry](/reference/be/android#android_ndk_repository). - -If you're using a recent version of the Android NDK (r22 and beyond), use the -Starlark implementation of `android_ndk_repository`. -Follow the instructions in -[its README](https://github.com/bazelbuild/rules_android_ndk). - -## Quick start - -To build C++ for Android, simply add `cc_library` dependencies to your -`android_binary` or `android_library` rules. - -For example, given the following `BUILD` file for an Android app: - -```python -# In /app/src/main/BUILD.bazel - -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], -) - -android_library( - name = "lib", - srcs = ["java/com/example/android/bazel/MainActivity.java"], - resource_files = glob(["res/**/*"]), - custom_package = "com.example.android.bazel", - manifest = "LibraryManifest.xml", - deps = [":jni_lib"], -) - -android_binary( - name = "app", - deps = [":lib"], - manifest = "AndroidManifest.xml", -) -``` - -This `BUILD` file results in the following target graph: - -![Example results](/docs/images/android_ndk.png "Build graph results") - -**Figure 1.** Build graph of Android project with cc_library dependencies. - -To build the app, simply run: - -```posix-terminal -bazel build //app/src/main:app -``` - -The `bazel build` command compiles the Java files, Android resource files, and -`cc_library` rules, and packages everything into an APK: - -```posix-terminal -$ zipinfo -1 bazel-bin/app/src/main/app.apk -nativedeps -lib/armeabi-v7a/libapp.so -classes.dex -AndroidManifest.xml -... -res/... -... -META-INF/CERT.SF -META-INF/CERT.RSA -META-INF/MANIFEST.MF -``` - -Bazel compiles all of the cc_libraries into a single shared object (`.so`) file, -targeted for the `armeabi-v7a` ABI by default. To change this or build for -multiple ABIs at the same time, see the section on [configuring the target -ABI](#configuring-target-abi). - -## Example setup - -This example is available in the [Bazel examples -repository](https://github.com/bazelbuild/examples/tree/master/android/ndk). - -In the `BUILD.bazel` file, three targets are defined with the `android_binary`, -`android_library`, and `cc_library` rules. - -The `android_binary` top-level target builds the APK. - -The `cc_library` target contains a single C++ source file with a JNI function -implementation: - -```c++ -#include -#include - -extern "C" -JNIEXPORT jstring - -JNICALL -Java_com_example_android_bazel_MainActivity_stringFromJNI( - JNIEnv *env, - jobject /* this */) { - std::string hello = "Hello from C++"; - return env->NewStringUTF(hello.c_str()); -} -``` - -The `android_library` target specifies the Java sources, resource files, and the -dependency on a `cc_library` target. For this example, `MainActivity.java` loads -the shared object file `libapp.so`, and defines the method signature for the JNI -function: - -```java -public class MainActivity extends AppCompatActivity { - - static { - System.loadLibrary("app"); - } - - @Override - protected void onCreate(Bundle savedInstanceState) { - // ... - } - - public native String stringFromJNI(); - -} -``` - -Note: The name of the native library is derived from the name of the top -level `android_binary` target. In this example, it is `app`. - -## Configuring the target ABI - -To configure the target ABI, use the `--android_platforms` flag as follows: - -```posix-terminal -bazel build //:app --android_platforms={{ "" }}comma-separated list of platforms{{ "" }} -``` - -Just like the `--platforms` flag, the values passed to `--android_platforms` are -the labels of [`platform`](https://bazel.build/reference/be/platforms-and-toolchains#platform) -targets, using standard constraint values to describe your device. - -For example, for an Android device with a 64-bit ARM processor, you'd define -your platform like this: - -```py -platform( - name = "android_arm64", - constraint_values = [ - "@platforms//os:android", - "@platforms//cpu:arm64", - ], -) -``` - -Every Android `platform` should use the [`@platforms//os:android`](https://github.com/bazelbuild/platforms/blob/33a3b209f94856193266871b1545054afb90bb28/os/BUILD#L36) -OS constraint. To migrate the CPU constraint, check this chart: - -CPU Value | Platform -------------- | ------------------------------------------ -`armeabi-v7a` | `@platforms//cpu:armv7` -`arm64-v8a` | `@platforms//cpu:arm64` -`x86` | `@platforms//cpu:x86_32` -`x86_64` | `@platforms//cpu:x86_64` - -And, of course, for a multi-architecture APK, you pass multiple labels, for -example: `--android_platforms=//:arm64,//:x86_64` (assuming you defined those in -your top-level `BUILD.bazel` file). - -Bazel is unable to select a default Android platform, so one must be defined and -specified with `--android_platforms`. - -Depending on the NDK revision and Android API level, the following ABIs are -available: - -| NDK revision | ABIs | -|--------------|-------------------------------------------------------------| -| 16 and lower | armeabi, armeabi-v7a, arm64-v8a, mips, mips64, x86, x86\_64 | -| 17 and above | armeabi-v7a, arm64-v8a, x86, x86\_64 | - -See [the NDK docs](https://developer.android.com/ndk/guides/abis.html) -for more information on these ABIs. - -Multi-ABI Fat APKs are not recommended for release builds since they increase -the size of the APK, but can be useful for development and QA builds. - -## Selecting a C++ standard - -Use the following flags to build according to a C++ standard: - -| C++ Standard | Flag | -|--------------|-------------------------| -| C++98 | Default, no flag needed | -| C++11 | `--cxxopt=-std=c++11` | -| C++14 | `--cxxopt=-std=c++14` | -| C++17 | `--cxxopt=-std=c++17` | - -For example: - -```posix-terminal -bazel build //:app --cxxopt=-std=c++11 -``` - -Read more about passing compiler and linker flags with `--cxxopt`, `--copt`, and -`--linkopt` in the [User Manual](/docs/user-manual#cxxopt). - -Compiler and linker flags can also be specified as attributes in `cc_library` -using `copts` and `linkopts`. For example: - -```python -cc_library( - name = "jni_lib", - srcs = ["cpp/native-lib.cpp"], - copts = ["-std=c++11"], - linkopts = ["-ldl"], # link against libdl -) -``` - -## Building a `cc_library` for Android without using `android_binary` - -To build a standalone `cc_binary` or `cc_library` for Android without using an -`android_binary`, use the `--platforms` flag. - -For example, assuming you have defined Android platforms in -`my/platforms/BUILD`: - -```posix-terminal -bazel build //my/cc/jni:target \ - --platforms=//my/platforms:x86_64 -``` - -With this approach, the entire build tree is affected. - -Note: All of the targets on the command line must be compatible with -building for Android when specifying these flags, which may make it difficult to -use [Bazel wild-cards](/run/build#specifying-build-targets) like -`/...` and `:all`. - -These flags can be put into a `bazelrc` config (one for each ABI), in -`{{ "" }}project{{ "" }}/.bazelrc`: - -``` -common:android_x86 --platforms=//my/platforms:x86 - -common:android_armeabi-v7a --platforms=//my/platforms:armeabi-v7a - -# In general -common:android_ --platforms=//my/platforms: -``` - -Then, to build a `cc_library` for `x86` for example, run: - -```posix-terminal -bazel build //my/cc/jni:target --config=android_x86 -``` - -In general, use this method for low-level targets (like `cc_library`) or when -you know exactly what you're building; rely on the automatic configuration -transitions from `android_binary` for high-level targets where you're expecting -to build a lot of targets you don't control. diff --git a/8.4.2/docs/bazel-and-android.mdx b/8.4.2/docs/bazel-and-android.mdx deleted file mode 100644 index bf3625c..0000000 --- a/8.4.2/docs/bazel-and-android.mdx +++ /dev/null @@ -1,45 +0,0 @@ ---- -title: 'Android and Bazel' ---- - - - -This page contains resources that help you use Bazel with Android projects. It -links to a tutorial, build rules, and other information specific to building -Android projects with Bazel. - -## Getting started - -The following resources will help you work with Bazel on Android projects: - -* [Tutorial: Building an Android app](/start/android-app ). This - tutorial is a good place to start learning about Bazel commands and concepts, - and how to build Android apps with Bazel. -* [Codelab: Building Android Apps with Bazel](https://developer.android.com/codelabs/bazel-android-intro#0). - This codelab explains how to build Android apps with Bazel. - -## Features - -Bazel has Android rules for building and testing Android apps, integrating with -the SDK/NDK, and creating emulator images. There are also Bazel plugins for -Android Studio and IntelliJ. - -* [Android rules](/reference/be/android). The Build Encyclopedia describes the rules - for building and testing Android apps with Bazel. -* [Integration with Android Studio](/install/ide). Bazel is compatible with - Android Studio using the [Android Studio with Bazel](https://ij.bazel.build/) - plugin. -* [`mobile-install` for Android](/docs/mobile-install). Bazel's `mobile-install` - feature provides automated build-and-deploy functionality for building and - testing Android apps directly on Android devices and emulators. -* [Android instrumentation testing](/docs/android-instrumentation-test) on - emulators and devices. -* [Android NDK integration](/docs/android-ndk). Bazel supports compiling to - native code through direct NDK integration and the C++ rules. -* [Android build performance](/docs/android-build-performance). This page - provides information on optimizing build performance for Android apps. - -## Further reading - -* Integrating with dependencies from Google Maven and Maven Central with [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external). -* Learn [How Android Builds Work in Bazel](https://blog.bazel.build/2018/02/14/how-android-builds-work-in-bazel.html). diff --git a/8.4.2/docs/bazel-and-apple.mdx b/8.4.2/docs/bazel-and-apple.mdx deleted file mode 100644 index 6e4a06f..0000000 --- a/8.4.2/docs/bazel-and-apple.mdx +++ /dev/null @@ -1,86 +0,0 @@ ---- -title: 'Apple Apps and Bazel' ---- - - - -This page contains resources that help you use Bazel to build macOS and iOS -projects. It links to a tutorial, build rules, and other information specific to -using Bazel to build and test for those platforms. - -## Working with Bazel - -The following resources will help you work with Bazel on macOS and iOS projects: - -* [Tutorial: Building an iOS app](/start/ios-app) -* [Objective-C build rules](/reference/be/objective-c) -* [General Apple rules](https://github.com/bazelbuild/rules_apple) -* [Integration with Xcode](/install/ide) - -## Migrating to Bazel - -If you currently build your macOS and iOS projects with Xcode, follow the steps -in the migration guide to start building them with Bazel: - -* [Migrating from Xcode to Bazel](/migrate/xcode) - -## Apple apps and new rules - -**Note**: Creating new rules is for advanced build and test scenarios. -You do not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) -when building your macOS and iOS projects: - -* Modules: - - * [`apple_bitcode_mode`](/rules/lib/builtins/apple_bitcode_mode) - * [`apple_common`](/rules/lib/toplevel/apple_common) - * [`apple_platform`](/rules/lib/builtins/apple_platform) - * [`apple_platform_type`](/rules/lib/builtins/apple_platform_type) - * [`apple_toolchain`](/rules/lib/builtins/apple_toolchain) - -* Configuration fragments: - - * [`apple`](/rules/lib/fragments/apple) - -* Providers: - - * [`ObjcProvider`](/rules/lib/providers/ObjcProvider) - * [`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) - -## Xcode selection - -If your build requires Xcode, Bazel will select an appropriate version based on -the `--xcode_config` and `--xcode_version` flags. The `--xcode_config` consumes -the set of available Xcode versions and sets a default version if -`--xcode_version` is not passed. This default is overridden by the -`--xcode_version` flag, as long as it is set to an Xcode version that is -represented in the `--xcode_config` target. - -If you do not pass `--xcode_config`, Bazel will use the autogenerated -[`XcodeVersionConfig`](/rules/lib/providers/XcodeVersionConfig) that represents the -Xcode versions available on your host machine. The default version is -the newest available Xcode version. This is appropriate for local execution. - -If you are performing remote builds, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `versions` attribute is a list of remotely available -[`xcode_version`](/reference/be/objective-c#xcode_version) -targets, and whose `default` attribute is one of these -[`xcode_versions`](/reference/be/objective-c#xcode_version). - -If you are using dynamic execution, you should set `--xcode_config` to an -[`xcode_config`](/reference/be/objective-c#xcode_config) -target whose `remote_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the remotely available Xcode versions, and whose -`local_versions` attribute is an -[`available_xcodes`](/reference/be/workspace#available_xcodes) -target containing the locally available Xcode versions. For `local_versions`, -you probably want to use the autogenerated -`@local_config_xcode//:host_available_xcodes`. The default Xcode version is the -newest mutually available version, if there is one, otherwise the default of the -`local_versions` target. If you prefer to use the `local_versions` default -as the default, you can pass `--experimental_prefer_mutual_default=false`. diff --git a/8.4.2/docs/bazel-and-cpp.mdx b/8.4.2/docs/bazel-and-cpp.mdx deleted file mode 100644 index 9ade384..0000000 --- a/8.4.2/docs/bazel-and-cpp.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: 'C++ and Bazel' ---- - - - -This page contains resources that help you use Bazel with C++ projects. It links -to a tutorial, build rules, and other information specific to building C++ -projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on C++ projects: - -* [Tutorial: Building a C++ project](/start/cpp) -* [C++ common use cases](/tutorials/cpp-use-cases) -* [C/C++ rules](/reference/be/c-cpp) -* Essential Libraries - - [Abseil](https://abseil.io/docs/cpp/quickstart) - - [Boost](https://github.com/nelhage/rules_boost) - - [HTTPS Requests: CPR and libcurl](https://github.com/hedronvision/bazel-make-cc-https-easy) -* [C++ toolchain configuration](/docs/cc-toolchain-config-reference) -* [Tutorial: Configuring C++ toolchains](/tutorials/ccp-toolchain-config) -* [Integrating with C++ rules](/configure/integrate-cpp) - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to C++ projects. - -### BUILD files - -Follow the guidelines below when creating your BUILD files: - -* Each `BUILD` file should contain one [`cc_library`](/reference/be/c-cpp#cc_library) - rule target per compilation unit in the directory. - -* You should granularize your C++ libraries as much as - possible to maximize incrementality and parallelize the build. - -* If there is a single source file in `srcs`, name the library the same as - that C++ file's name. This library should contain C++ file(s), any matching - header file(s), and the library's direct dependencies. For example: - - ```python - cc_library( - name = "mylib", - srcs = ["mylib.cc"], - hdrs = ["mylib.h"], - deps = [":lower-level-lib"] - ) - ``` - -* Use one `cc_test` rule target per `cc_library` target in the file. Name the - target `[library-name]_test` and the source file `[library-name]_test.cc`. - For example, a test target for the `mylib` library target shown above would - look like this: - - ```python - cc_test( - name = "mylib_test", - srcs = ["mylib_test.cc"], - deps = [":mylib"] - ) - ``` - -### Include paths - -Follow these guidelines for include paths: - -* Make all include paths relative to the workspace directory. - -* Use quoted includes (`#include "foo/bar/baz.h"`) for non-system headers, not - angle-brackets (`#include `). - -* Avoid using UNIX directory shortcuts, such as `.` (current directory) or `..` - (parent directory). - -* For legacy or `third_party` code that requires includes pointing outside the - project repository, such as external repository includes requiring a prefix, - use the [`include_prefix`](/reference/be/c-cpp#cc_library.include_prefix) and - [`strip_include_prefix`](/reference/be/c-cpp#cc_library.strip_include_prefix) - arguments on the `cc_library` rule target. - -### Toolchain features - -The following optional [features](/docs/cc-toolchain-config-reference#features) -can improve the hygiene of a C++ project. They can be enabled using the -`--features` command-line flag or the `features` attribute of -[`repo`](/external/overview#repo.bazel), -[`package`](/reference/be/functions#package) or `cc_*` rules: - -* The `parse_headers` feature makes it so that the C++ compiler is used to parse - (but not compile) all header files in the built targets and their dependencies - when using the - [`--process_headers_in_dependencies`](/reference/command-line-reference#flag--process_headers_in_dependencies) - flag. This can help catch issues in header-only libraries and ensure that - headers are self-contained and independent of the order in which they are - included. -* The `layering_check` feature enforces that targets only include headers - provided by their direct dependencies. The default toolchain supports this - feature on Linux with `clang` as the compiler. diff --git a/8.4.2/docs/bazel-and-java.mdx b/8.4.2/docs/bazel-and-java.mdx deleted file mode 100644 index e9476aa..0000000 --- a/8.4.2/docs/bazel-and-java.mdx +++ /dev/null @@ -1,343 +0,0 @@ ---- -title: 'Java and Bazel' ---- - - - -This page contains resources that help you use Bazel with Java projects. It -links to a tutorial, build rules, and other information specific to building -Java projects with Bazel. - -## Working with Bazel - -The following resources will help you work with Bazel on Java projects: - -* [Tutorial: Building a Java Project](/start/java) -* [Java rules](/reference/be/java) - -## Migrating to Bazel - -If you currently build your Java projects with Maven, follow the steps in the -migration guide to start building your Maven projects with Bazel: - -* [Migrating from Maven to Bazel](/migrate/maven) - -## Java versions - -There are two relevant versions of Java that are set with configuration flags: - -* the version of the source files in the repository -* the version of the Java runtime that is used to execute the code and to test - it - -### Configuring the version of the source code in your repository - -Without an additional configuration, Bazel assumes all Java source files in the -repository are written in a single Java version. To specify the version of the -sources in the repository add `build --java_language_version={ver}` to -`.bazelrc` file, where `{ver}` is for example `11`. Bazel repository owners -should set this flag so that Bazel and its users can reference the source code's -Java version number. For more details, see -[Java language version flag](/docs/user-manual#java-language-version). - -### Configuring the JVM used to execute and test the code - -Bazel uses one JDK for compilation and another JVM to execute and test the code. - -By default Bazel compiles the code using a JDK it downloads and it executes and -tests the code with the JVM installed on the local machine. Bazel searches for -the JVM using `JAVA_HOME` or path. - -The resulting binaries are compatible with locally installed JVM in system -libraries, which means the resulting binaries depend on what is installed on the -machine. - -To configure the JVM used for execution and testing use `--java_runtime_version` -flag. The default value is `local_jdk`. - -### Hermetic testing and compilation - -To create a hermetic compile, you can use command line flag -`--java_runtime_version=remotejdk_11`. The code is compiled for, executed, and -tested on the JVM downloaded from a remote repository. For more details, see -[Java runtime version flag](/docs/user-manual#java_runtime_version). - -### Configuring compilation and execution of build tools in Java - -There is a second pair of JDK and JVM used to build and execute tools, which are -used in the build process, but are not in the build results. That JDK and JVM -are controlled using `--tool_java_language_version` and -`--tool_java_runtime_version`. Default values are `11` and `remotejdk_11`, -respectively. - -#### Compiling using locally installed JDK - -Bazel by default compiles using remote JDK, because it is overriding JDK's -internals. The compilation toolchains using locally installed JDK are configured, -however not used. - -To compile using locally installed JDK, that is use the compilation toolchains -for local JDK, use additional flag `--extra_toolchains=@local_jdk//:all`, -however, mind that this may not work on JDK of arbitrary vendors. - -For more details, see -[configuring Java toolchains](#config-java-toolchains). - -## Best practices - -In addition to [general Bazel best practices](/configure/best-practices), below are -best practices specific to Java projects. - -### Directory structure - -Prefer Maven's standard directory layout (sources under `src/main/java`, tests -under `src/test/java`). - -### BUILD files - -Follow these guidelines when creating your `BUILD` files: - -* Use one `BUILD` file per directory containing Java sources, because this - improves build performance. - -* Every `BUILD` file should contain one `java_library` rule that looks like - this: - - ```python - java_library( - name = "directory-name", - srcs = glob(["*.java"]), - deps = [...], - ) - ``` - -* The name of the library should be the name of the directory containing the - `BUILD` file. This makes the label of the library shorter, that is use - `"//package"` instead of `"//package:package"`. - -* The sources should be a non-recursive [`glob`](/reference/be/functions#glob) of - all Java files in the directory. - -* Tests should be in a matching directory under `src/test` and depend on this - library. - -## Creating new rules for advanced Java builds - -**Note**: Creating new rules is for advanced build and test scenarios. You do -not need it when getting started with Bazel. - -The following modules, configuration fragments, and providers will help you -[extend Bazel's capabilities](/extending/concepts) when building your Java -projects: - -* Main Java module: [`java_common`](/rules/lib/toplevel/java_common) -* Main Java provider: [`JavaInfo`](/rules/lib/providers/JavaInfo) -* Configuration fragment: [`java`](/rules/lib/fragments/java) -* Other modules: - - * [`java_annotation_processing`](/rules/lib/builtins/java_annotation_processing) - * [`java_compilation_info`](/rules/lib/providers/java_compilation_info) - * [`java_output_jars`](/rules/lib/providers/java_output_jars) - * [`JavaRuntimeInfo`](/rules/lib/providers/JavaRuntimeInfo) - * [`JavaToolchainInfo`](/rules/lib/providers/JavaToolchainInfo) - -## Configuring the Java toolchains - -Bazel uses two types of Java toolchains: -- execution, used to execute and test Java binaries, controlled with - `--java_runtime_version` flag -- compilation, used to compile Java sources, controlled with - `--java_language_version` flag - -### Configuring additional execution toolchains - -Execution toolchain is the JVM, either local or from a repository, with some -additional information about its version, operating system, and CPU -architecture. - -Java execution toolchains may added using the `local_java_repository` or -`remote_java_repository` repo rules in a module extension. Adding the rule makes -the JVM available using a flag. When multiple definitions for the same operating -system and CPU architecture are given, the first one is used. - -Example configuration of local JVM: - -```python -load("@rules_java//toolchains:local_java_repository.bzl", "local_java_repository") - -local_java_repository( - name = "additionaljdk", # Can be used with --java_runtime_version=additionaljdk, --java_runtime_version=11 or --java_runtime_version=additionaljdk_11 - version = 11, # Optional, if not set it is autodetected - java_home = "/usr/lib/jdk-15/", # Path to directory containing bin/java -) -``` - -Example configuration of remote JVM: - -```python -load("@rules_java//toolchains:remote_java_repository.bzl", "remote_java_repository") - -remote_java_repository( - name = "openjdk_canary_linux_arm", - prefix = "openjdk_canary", # Can be used with --java_runtime_version=openjdk_canary_11 - version = "11", # or --java_runtime_version=11 - target_compatible_with = [ # Specifies constraints this JVM is compatible with - "@platforms//cpu:arm", - "@platforms//os:linux", - ], - urls = ..., # Other parameters are from http_repository rule. - sha256 = ..., - strip_prefix = ... -) -``` - -### Configuring additional compilation toolchains - -Compilation toolchain is composed of JDK and multiple tools that Bazel uses -during the compilation and that provides additional features, such as: Error -Prone, strict Java dependencies, header compilation, Android desugaring, -coverage instrumentation, and genclass handling for IDEs. - -JavaBuilder is a Bazel-bundled tool that executes compilation, and provides the -aforementioned features. Actual compilation is executed using the internal -compiler by the JDK. The JDK used for compilation is specified by `java_runtime` -attribute of the toolchain. - -Bazel overrides some JDK internals. In case of JDK version > 9, -`java.compiler` and `jdk.compiler` modules are patched using JDK's flag -`--patch_module`. In case of JDK version 8, the Java compiler is patched using -`-Xbootclasspath` flag. - -VanillaJavaBuilder is a second implementation of JavaBuilder, -which does not modify JDK's internal compiler and does not have any of the -additional features. VanillaJavaBuilder is not used by any of the built-in -toolchains. - -In addition to JavaBuilder, Bazel uses several other tools during compilation. - -The `ijar` tool processes `jar` files to remove everything except call -signatures. Resulting jars are called header jars. They are used to improve the -compilation incrementality by only recompiling downstream dependents when the -body of a function changes. - -The `singlejar` tool packs together multiple `jar` files into a single one. - -The `genclass` tool post-processes the output of a Java compilation, and produces -a `jar` containing only the class files for sources that were generated by -annotation processors. - -The `JacocoRunner` tool runs Jacoco over instrumented files and outputs results in -LCOV format. - -The `TestRunner` tool executes JUnit 4 tests in a controlled environment. - -You can reconfigure the compilation by adding `default_java_toolchain` macro to -a `BUILD` file and registering it either by adding `register_toolchains` rule to -the `MODULE.bazel` file or by using -[`--extra_toolchains`](/docs/user-manual#extra-toolchains) flag. - -The toolchain is only used when the `source_version` attribute matches the -value specified by `--java_language_version` flag. - -Example toolchain configuration: - -```python -load( - "@rules_java//toolchains:default_java_toolchain.bzl", - "default_java_toolchain", "DEFAULT_TOOLCHAIN_CONFIGURATION", "BASE_JDK9_JVM_OPTS", "DEFAULT_JAVACOPTS" -) - -default_java_toolchain( - name = "repository_default_toolchain", - configuration = DEFAULT_TOOLCHAIN_CONFIGURATION, # One of predefined configurations - # Other parameters are from java_toolchain rule: - java_runtime = "@rules_java//toolchains:remote_jdk11", # JDK to use for compilation and toolchain's tools execution - jvm_opts = BASE_JDK9_JVM_OPTS + ["--enable_preview"], # Additional JDK options - javacopts = DEFAULT_JAVACOPTS + ["--enable_preview"], # Additional javac options - source_version = "9", -) -``` - -which can be used using `--extra_toolchains=//:repository_default_toolchain_definition` -or by adding `register_toolchains("//:repository_default_toolchain_definition")` -to the workpace. - -Predefined configurations: - -- `DEFAULT_TOOLCHAIN_CONFIGURATION`: all features, supports JDK versions >= 9 -- `VANILLA_TOOLCHAIN_CONFIGURATION`: no additional features, supports JDKs of - arbitrary vendors. -- `PREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but only use prebuilt - tools (`ijar`, `singlejar`) -- `NONPREBUILT_TOOLCHAIN_CONFIGURATION`: same as default, but all tools are - built from sources (this may be useful on operating system with different - libc) - -#### Configuring JVM and Java compiler flags - -You may configure JVM and javac flags either with flags or with - `default_java_toolchain` attributes. - -The relevant flags are `--jvmopt`, `--host_jvmopt`, `--javacopt`, and -`--host_javacopt`. - -The relevant `default_java_toolchain` attributes are `javacopts`, `jvm_opts`, -`javabuilder_jvm_opts`, and `turbine_jvm_opts`. - -#### Package specific Java compiler flags configuration - -You can configure different Java compiler flags for specific source -files using `package_configuration` attribute of `default_java_toolchain`. -Please refer to the example below. - -```python -load("@rules_java//toolchains:default_java_toolchain.bzl", "default_java_toolchain") - -# This is a convenience macro that inherits values from Bazel's default java_toolchain -default_java_toolchain( - name = "toolchain", - package_configuration = [ - ":error_prone", - ], - visibility = ["//visibility:public"], -) - -# This associates a set of javac flags with a set of packages -java_package_configuration( - name = "error_prone", - javacopts = [ - "-Xep:MissingOverride:ERROR", - ], - packages = ["error_prone_packages"], -) - -# This is a regular package_group, which is used to specify a set of packages to apply flags to -package_group( - name = "error_prone_packages", - packages = [ - "//foo/...", - "-//foo/bar/...", # this is an exclusion - ], -) -``` - -#### Multiple versions of Java source code in a single repository - -Bazel only supports compiling a single version of Java sources in a build. -build. This means that when building a Java test or an application, all - dependencies are built against the same Java version. - -However, separate builds may be executed using different flags. - -To make the task of using different flags easier, sets of flags for a specific -version may be grouped with `.bazelrc` configs": - -```python -build:java8 --java_language_version=8 -build:java8 --java_runtime_version=local_jdk_8 -build:java11 --java_language_version=11 -build:java11 --java_runtime_version=remotejdk_11 -``` - -These configs can be used with the `--config` flag, for example -`bazel test --config=java11 //:java11_test`. diff --git a/8.4.2/docs/bazel-and-javascript.mdx b/8.4.2/docs/bazel-and-javascript.mdx deleted file mode 100644 index 63d8018..0000000 --- a/8.4.2/docs/bazel-and-javascript.mdx +++ /dev/null @@ -1,24 +0,0 @@ ---- -title: 'JavaScript and Bazel' ---- - - - -This page contains resources that help you use Bazel with JavaScript projects. -It links to build rules and other information specific to building JavaScript -with Bazel. - -The following resources will help you work with Bazel on JavaScript projects: - -* [NodeJS toolchain](https://github.com/bazelbuild/rules_nodejs) -* [rules_js](https://github.com/aspect-build/rules_js) - Bazel rules for building JavaScript programs -* [rules_esbuild](https://github.com/aspect-build/rules_esbuild) - Bazel rules for [esbuild](https://esbuild.github.io) JS bundler -* [rules_terser](https://github.com/aspect-build/rules_terser) - Bazel rules for [Terser](https://terser.org) - a JavaScript minifier -* [rules_swc](https://github.com/aspect-build/rules_swc) - Bazel rules for [swc](https://swc.rs) -* [rules_ts](https://github.com/aspect-build/rules_ts) - Bazel rules for [TypeScript](http://typescriptlang.org) -* [rules_webpack](https://github.com/aspect-build/rules_webpack) - Bazel rules for [Webpack](https://webpack.js.org) -* [rules_rollup](https://github.com/aspect-build/rules_rollup) - Bazel rules for [Rollup](https://rollupjs.org) - a JavaScript bundler -* [rules_jest](https://github.com/aspect-build/rules_jest) - Bazel rules to run tests using [Jest](https://jestjs.io) -* [rules_jasmine](https://github.com/aspect-build/rules_jasmine) - Bazel rules to run tests using [Jasmine](https://jasmine.github.io/) -* [rules_cypress](https://github.com/aspect-build/rules_cypress) - Bazel rules to run tests using [Cypress](https://cypress.io) -* [rules_deno](https://github.com/aspect-build/rules_deno) - Bazel rules for [Deno](http://deno.land) diff --git a/8.4.2/docs/configurable-attributes.mdx b/8.4.2/docs/configurable-attributes.mdx deleted file mode 100644 index 3515852..0000000 --- a/8.4.2/docs/configurable-attributes.mdx +++ /dev/null @@ -1,1099 +0,0 @@ ---- -title: 'Configurable Build Attributes' ---- - - - -**_Configurable attributes_**, commonly known as [`select()`]( -/reference/be/functions#select), is a Bazel feature that lets users toggle the values -of build rule attributes at the command line. - -This can be used, for example, for a multiplatform library that automatically -chooses the appropriate implementation for the architecture, or for a -feature-configurable binary that can be customized at build time. - -## Example - -```python -# myapp/BUILD - -cc_binary( - name = "mybinary", - srcs = ["main.cc"], - deps = select({ - ":arm_build": [":arm_lib"], - ":x86_debug_build": [":x86_dev_lib"], - "//conditions:default": [":generic_lib"], - }), -) - -config_setting( - name = "arm_build", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_debug_build", - values = { - "cpu": "x86", - "compilation_mode": "dbg", - }, -) -``` - -This declares a `cc_binary` that "chooses" its deps based on the flags at the -command line. Specifically, `deps` becomes: - - - - - - - - - - - - - - - - - - - - - - -
Commanddeps =
bazel build //myapp:mybinary --cpu=arm[":arm_lib"]
bazel build //myapp:mybinary -c dbg --cpu=x86[":x86_dev_lib"]
bazel build //myapp:mybinary --cpu=ppc[":generic_lib"]
bazel build //myapp:mybinary -c dbg --cpu=ppc[":generic_lib"]
- -`select()` serves as a placeholder for a value that will be chosen based on -*configuration conditions*, which are labels referencing [`config_setting`](/reference/be/general#config_setting) -targets. By using `select()` in a configurable attribute, the attribute -effectively adopts different values when different conditions hold. - -Matches must be unambiguous: if multiple conditions match then either -* They all resolve to the same value. For example, when running on linux x86, this is unambiguous - `{"@platforms//os:linux": "Hello", "@platforms//cpu:x86_64": "Hello"}` because both branches resolve to "hello". -* One's `values` is a strict superset of all others'. For example, `values = {"cpu": "x86", "compilation_mode": "dbg"}` - is an unambiguous specialization of `values = {"cpu": "x86"}`. - -The built-in condition [`//conditions:default`](#default-condition) automatically matches when -nothing else does. - -While this example uses `deps`, `select()` works just as well on `srcs`, -`resources`, `cmd`, and most other attributes. Only a small number of attributes -are *non-configurable*, and these are clearly annotated. For example, -`config_setting`'s own -[`values`](/reference/be/general#config_setting.values) attribute is non-configurable. - -## `select()` and dependencies - -Certain attributes change the build parameters for all transitive dependencies -under a target. For example, `genrule`'s `tools` changes `--cpu` to the CPU of -the machine running Bazel (which, thanks to cross-compilation, may be different -than the CPU the target is built for). This is known as a -[configuration transition](/reference/glossary#transition). - -Given - -```python -#myapp/BUILD - -config_setting( - name = "arm_cpu", - values = {"cpu": "arm"}, -) - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -genrule( - name = "my_genrule", - srcs = select({ - ":arm_cpu": ["g_arm.src"], - ":x86_cpu": ["g_x86.src"], - }), - tools = select({ - ":arm_cpu": [":tool1"], - ":x86_cpu": [":tool2"], - }), -) - -cc_binary( - name = "tool1", - srcs = select({ - ":arm_cpu": ["armtool.cc"], - ":x86_cpu": ["x86tool.cc"], - }), -) -``` - -running - -```sh -$ bazel build //myapp:my_genrule --cpu=arm -``` - -on an `x86` developer machine binds the build to `g_arm.src`, `tool1`, and -`x86tool.cc`. Both of the `select`s attached to `my_genrule` use `my_genrule`'s -build parameters, which include `--cpu=arm`. The `tools` attribute changes -`--cpu` to `x86` for `tool1` and its transitive dependencies. The `select` on -`tool1` uses `tool1`'s build parameters, which include `--cpu=x86`. - -## Configuration conditions - -Each key in a configurable attribute is a label reference to a -[`config_setting`](/reference/be/general#config_setting) or -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value). - -`config_setting` is just a collection of -expected command line flag settings. By encapsulating these in a target, it's -easy to maintain "standard" conditions users can reference from multiple places. - -`constraint_value` provides support for [multi-platform behavior](#platforms). - -### Built-in flags - -Flags like `--cpu` are built into Bazel: the build tool natively understands -them for all builds in all projects. These are specified with -[`config_setting`](/reference/be/general#config_setting)'s -[`values`](/reference/be/general#config_setting.values) attribute: - -```python -config_setting( - name = "meaningful_condition_name", - values = { - "flag1": "value1", - "flag2": "value2", - ... - }, -) -``` - -`flagN` is a flag name (without `--`, so `"cpu"` instead of `"--cpu"`). `valueN` -is the expected value for that flag. `:meaningful_condition_name` matches if -*every* entry in `values` matches. Order is irrelevant. - -`valueN` is parsed as if it was set on the command line. This means: - -* `values = { "compilation_mode": "opt" }` matches `bazel build -c opt` -* `values = { "force_pic": "true" }` matches `bazel build --force_pic=1` -* `values = { "force_pic": "0" }` matches `bazel build --noforce_pic` - -`config_setting` only supports flags that affect target behavior. For example, -[`--show_progress`](/docs/user-manual#show-progress) isn't allowed because -it only affects how Bazel reports progress to the user. Targets can't use that -flag to construct their results. The exact set of supported flags isn't -documented. In practice, most flags that "make sense" work. - -### Custom flags - -You can model your own project-specific flags with -[Starlark build settings][BuildSettings]. Unlike built-in flags, these are -defined as build targets, so Bazel references them with target labels. - -These are triggered with [`config_setting`](/reference/be/general#config_setting)'s -[`flag_values`](/reference/be/general#config_setting.flag_values) -attribute: - -```python -config_setting( - name = "meaningful_condition_name", - flag_values = { - "//myflags:flag1": "value1", - "//myflags:flag2": "value2", - ... - }, -) -``` - -Behavior is the same as for [built-in flags](#built-in-flags). See [here](https://github.com/bazelbuild/examples/tree/HEAD/configurations/select_on_build_setting) -for a working example. - -[`--define`](/reference/command-line-reference#flag--define) -is an alternative legacy syntax for custom flags (for example -`--define foo=bar`). This can be expressed either in the -[values](/reference/be/general#config_setting.values) attribute -(`values = {"define": "foo=bar"}`) or the -[define_values](/reference/be/general#config_setting.define_values) attribute -(`define_values = {"foo": "bar"}`). `--define` is only supported for backwards -compatibility. Prefer Starlark build settings whenever possible. - -`values`, `flag_values`, and `define_values` evaluate independently. The -`config_setting` matches if all values across all of them match. - -## The default condition - -The built-in condition `//conditions:default` matches when no other condition -matches. - -Because of the "exactly one match" rule, a configurable attribute with no match -and no default condition emits a `"no matching conditions"` error. This can -protect against silent failures from unexpected settings: - -```python -# myapp/BUILD - -config_setting( - name = "x86_cpu", - values = {"cpu": "x86"}, -) - -cc_library( - name = "x86_only_lib", - srcs = select({ - ":x86_cpu": ["lib.cc"], - }), -) -``` - -```sh -$ bazel build //myapp:x86_only_lib --cpu=arm -ERROR: Configurable attribute "srcs" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //myapp:x86_cpu -``` - -For even clearer errors, you can set custom messages with `select()`'s -[`no_match_error`](#custom-error-messages) attribute. - -## Platforms - -While the ability to specify multiple flags on the command line provides -flexibility, it can also be burdensome to individually set each one every time -you want to build a target. - [Platforms](/extending/platforms) -let you consolidate these into simple bundles. - -```python -# myapp/BUILD - -sh_binary( - name = "my_rocks", - srcs = select({ - ":basalt": ["pyroxene.sh"], - ":marble": ["calcite.sh"], - "//conditions:default": ["feldspar.sh"], - }), -) - -config_setting( - name = "basalt", - constraint_values = [ - ":black", - ":igneous", - ], -) - -config_setting( - name = "marble", - constraint_values = [ - ":white", - ":metamorphic", - ], -) - -# constraint_setting acts as an enum type, and constraint_value as an enum value. -constraint_setting(name = "color") -constraint_value(name = "black", constraint_setting = "color") -constraint_value(name = "white", constraint_setting = "color") -constraint_setting(name = "texture") -constraint_value(name = "smooth", constraint_setting = "texture") -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") - -platform( - name = "basalt_platform", - constraint_values = [ - ":black", - ":igneous", - ], -) - -platform( - name = "marble_platform", - constraint_values = [ - ":white", - ":smooth", - ":metamorphic", - ], -) -``` - -The platform can be specified on the command line. It activates the -`config_setting`s that contain a subset of the platform's `constraint_values`, -allowing those `config_setting`s to match in `select()` expressions. - -For example, in order to set the `srcs` attribute of `my_rocks` to `calcite.sh`, -you can simply run - -```sh -bazel build //my_app:my_rocks --platforms=//myapp:marble_platform -``` - -Without platforms, this might look something like - -```sh -bazel build //my_app:my_rocks --define color=white --define texture=smooth --define type=metamorphic -``` - -`select()` can also directly read `constraint_value`s: - -```python -constraint_setting(name = "type") -constraint_value(name = "igneous", constraint_setting = "type") -constraint_value(name = "metamorphic", constraint_setting = "type") -sh_binary( - name = "my_rocks", - srcs = select({ - ":igneous": ["igneous.sh"], - ":metamorphic" ["metamorphic.sh"], - }), -) -``` - -This saves the need for boilerplate `config_setting`s when you only need to -check against single values. - -Platforms are still under development. See the -[documentation](/concepts/platforms) for details. - -## Combining `select()`s - -`select` can appear multiple times in the same attribute: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"] + - select({ - ":armeabi_mode": ["armeabi_src.sh"], - ":x86_mode": ["x86_src.sh"], - }) + - select({ - ":opt_mode": ["opt_extras.sh"], - ":dbg_mode": ["dbg_extras.sh"], - }), -) -``` - -Note: Some restrictions apply on what can be combined in the `select`s values: - - Duplicate labels can appear in different paths of the same `select`. - - Duplicate labels can *not* appear within the same path of a `select`. - - Duplicate labels can *not* appear across multiple combined `select`s (no matter what path) - -`select` cannot appear inside another `select`. If you need to nest `selects` -and your attribute takes other targets as values, use an intermediate target: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":armeabi_mode": [":armeabi_lib"], - ... - }), -) - -sh_library( - name = "armeabi_lib", - srcs = select({ - ":opt_mode": ["armeabi_with_opt.sh"], - ... - }), -) -``` - -If you need a `select` to match when multiple conditions match, consider [AND -chaining](#and-chaining). - -## OR chaining - -Consider the following: - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": [":standard_lib"], - ":config2": [":standard_lib"], - ":config3": [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -Most conditions evaluate to the same dep. But this syntax is hard to read and -maintain. It would be nice to not have to repeat `[":standard_lib"]` multiple -times. - -One option is to predefine the value as a BUILD variable: - -```python -STANDARD_DEP = [":standard_lib"] - -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1": STANDARD_DEP, - ":config2": STANDARD_DEP, - ":config3": STANDARD_DEP, - ":config4": [":special_lib"], - }), -) -``` - -This makes it easier to manage the dependency. But it still causes unnecessary -duplication. - -For more direct support, use one of the following: - -### `selects.with_or` - -The -[with_or](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing conditions directly inside a `select`: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - -```python -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = selects.with_or({ - (":config1", ":config2", ":config3"): [":standard_lib"], - ":config4": [":special_lib"], - }), -) -``` - -### `selects.config_setting_group` - - -The -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group) -macro in [Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md) -module supports `OR`ing multiple `config_setting`s: - -```python -load("@bazel_skylib//lib:selects.bzl", "selects") -``` - - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_or_2", - match_any = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_or_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike `selects.with_or`, different targets can share `:config1_or_2` across -different attributes. - -It's an error for multiple conditions to match unless one is an unambiguous -"specialization" of the others or they all resolve to the same value. See [here](#configurable-build-example) for details. - -## AND chaining - -If you need a `select` branch to match when multiple conditions match, use the -[Skylib](https://github.com/bazelbuild/bazel-skylib) macro -[config_setting_group](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectsconfig_setting_group): - -```python -config_setting( - name = "config1", - values = {"cpu": "arm"}, -) -config_setting( - name = "config2", - values = {"compilation_mode": "dbg"}, -) -selects.config_setting_group( - name = "config1_and_2", - match_all = [":config1", ":config2"], -) -sh_binary( - name = "my_target", - srcs = ["always_include.sh"], - deps = select({ - ":config1_and_2": [":standard_lib"], - "//conditions:default": [":other_lib"], - }), -) -``` - -Unlike OR chaining, existing `config_setting`s can't be directly `AND`ed -inside a `select`. You have to explicitly wrap them in a `config_setting_group`. - -## Custom error messages - -By default, when no condition matches, the target the `select()` is attached to -fails with the error: - -```sh -ERROR: Configurable attribute "deps" doesn't match this configuration (would -a default condition help?). -Conditions checked: - //tools/cc_target_os:darwin - //tools/cc_target_os:android -``` - -This can be customized with the [`no_match_error`](/reference/be/functions#select) -attribute: - -```python -cc_library( - name = "my_lib", - deps = select( - { - "//tools/cc_target_os:android": [":android_deps"], - "//tools/cc_target_os:windows": [":windows_deps"], - }, - no_match_error = "Please build with an Android or Windows toolchain", - ), -) -``` - -```sh -$ bazel build //myapp:my_lib -ERROR: Configurable attribute "deps" doesn't match this configuration: Please -build with an Android or Windows toolchain -``` - -## Rules compatibility - -Rule implementations receive the *resolved values* of configurable -attributes. For example, given: - -```python -# myapp/BUILD - -some_rule( - name = "my_target", - some_attr = select({ - ":foo_mode": [":foo"], - ":bar_mode": [":bar"], - }), -) -``` - -```sh -$ bazel build //myapp/my_target --define mode=foo -``` - -Rule implementation code sees `ctx.attr.some_attr` as `[":foo"]`. - -Macros can accept `select()` clauses and pass them through to native -rules. But *they cannot directly manipulate them*. For example, there's no way -for a macro to convert - -```python -select({"foo": "val"}, ...) -``` - -to - -```python -select({"foo": "val_with_suffix"}, ...) -``` - -This is for two reasons. - -First, macros that need to know which path a `select` will choose *cannot work* -because macros are evaluated in Bazel's [loading phase](/run/build#loading), -which occurs before flag values are known. -This is a core Bazel design restriction that's unlikely to change any time soon. - -Second, macros that just need to iterate over *all* `select` paths, while -technically feasible, lack a coherent UI. Further design is necessary to change -this. - -## Bazel query and cquery - -Bazel [`query`](/query/guide) operates over Bazel's -[loading phase](/reference/glossary#loading-phase). -This means it doesn't know what command line flags a target uses since those -flags aren't evaluated until later in the build (in the -[analysis phase](/reference/glossary#analysis-phase)). -So it can't determine which `select()` branches are chosen. - -Bazel [`cquery`](/query/cquery) operates after Bazel's analysis phase, so it has -all this information and can accurately resolve `select()`s. - -Consider: - -```python -load("@bazel_skylib//rules:common_settings.bzl", "string_flag") -``` -```python -# myapp/BUILD - -string_flag( - name = "dog_type", - build_setting_default = "cat" -) - -cc_library( - name = "my_lib", - deps = select({ - ":long": [":foo_dep"], - ":short": [":bar_dep"], - }), -) - -config_setting( - name = "long", - flag_values = {":dog_type": "dachshund"}, -) - -config_setting( - name = "short", - flag_values = {":dog_type": "pug"}, -) -``` - -`query` overapproximates `:my_lib`'s dependencies: - -```sh -$ bazel query 'deps(//myapp:my_lib)' -//myapp:my_lib -//myapp:foo_dep -//myapp:bar_dep -``` - -while `cquery` shows its exact dependencies: - -```sh -$ bazel cquery 'deps(//myapp:my_lib)' --//myapp:dog_type=pug -//myapp:my_lib -//myapp:bar_dep -``` - -## FAQ - -### Why doesn't select() work in macros? - -select() *does* work in rules! See [Rules compatibility](#rules-compatibility) for -details. - -The key issue this question usually means is that select() doesn't work in -*macros*. These are different than *rules*. See the -documentation on [rules](/extending/rules) and [macros](/extending/macros) -to understand the difference. -Here's an end-to-end example: - -Define a rule and macro: - -```python -# myapp/defs.bzl - -# Rule implementation: when an attribute is read, all select()s have already -# been resolved. So it looks like a plain old attribute just like any other. -def _impl(ctx): - name = ctx.attr.name - allcaps = ctx.attr.my_config_string.upper() # This works fine on all values. - print("My name is " + name + " with custom message: " + allcaps) - -# Rule declaration: -my_custom_bazel_rule = rule( - implementation = _impl, - attrs = {"my_config_string": attr.string()}, -) - -# Macro declaration: -def my_custom_bazel_macro(name, my_config_string): - allcaps = my_config_string.upper() # This line won't work with select(s). - print("My name is " + name + " with custom message: " + allcaps) -``` - -Instantiate the rule and macro: - -```python -# myapp/BUILD - -load("//myapp:defs.bzl", "my_custom_bazel_rule") -load("//myapp:defs.bzl", "my_custom_bazel_macro") - -my_custom_bazel_rule( - name = "happy_rule", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "second string", - }), -) - -my_custom_bazel_macro( - name = "happy_macro", - my_config_string = "fixed string", -) - -my_custom_bazel_macro( - name = "sad_macro", - my_config_string = select({ - "//third_party/bazel_platforms/cpu:x86_32": "first string", - "//third_party/bazel_platforms/cpu:ppc": "other string", - }), -) -``` - -Building fails because `sad_macro` can't process the `select()`: - -```sh -$ bazel build //myapp:all -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -ERROR: error loading package 'myapp': Package 'myapp' contains errors. -``` - -Building succeeds when you comment out `sad_macro`: - -```sh -# Comment out sad_macro so it doesn't mess up the build. -$ bazel build //myapp:all -DEBUG: /myworkspace/myapp/defs.bzl:5:3: My name is happy_macro with custom message: FIXED STRING. -DEBUG: /myworkspace/myapp/hi.bzl:15:3: My name is happy_rule with custom message: FIRST STRING. -``` - -This is impossible to change because *by definition* macros are evaluated before -Bazel reads the build's command line flags. That means there isn't enough -information to evaluate select()s. - -Macros can, however, pass `select()`s as opaque blobs to rules: - -```python -# myapp/defs.bzl - -def my_custom_bazel_macro(name, my_config_string): - print("Invoking macro " + name) - my_custom_bazel_rule( - name = name + "_as_target", - my_config_string = my_config_string, - ) -``` - -```sh -$ bazel build //myapp:sad_macro_less_sad -DEBUG: /myworkspace/myapp/defs.bzl:23:3: Invoking macro sad_macro_less_sad. -DEBUG: /myworkspace/myapp/defs.bzl:15:3: My name is sad_macro_less_sad with custom message: FIRST STRING. -``` - -### Why does select() always return true? - -Because *macros* (but not rules) by definition -[can't evaluate `select()`s](#faq-select-macro), any attempt to do so -usually produces an error: - -```sh -ERROR: /myworkspace/myapp/BUILD:17:1: Traceback - (most recent call last): -File "/myworkspace/myapp/BUILD", line 17 -my_custom_bazel_macro(name = "sad_macro", my_config_stri..."})) -File "/myworkspace/myapp/defs.bzl", line 4, in - my_custom_bazel_macro -my_config_string.upper() -type 'select' has no method upper(). -``` - -Booleans are a special case that fail silently, so you should be particularly -vigilant with them: - -```sh -$ cat myapp/defs.bzl -def my_boolean_macro(boolval): - print("TRUE" if boolval else "FALSE") - -$ cat myapp/BUILD -load("//myapp:defs.bzl", "my_boolean_macro") -my_boolean_macro( - boolval = select({ - "//third_party/bazel_platforms/cpu:x86_32": True, - "//third_party/bazel_platforms/cpu:ppc": False, - }), -) - -$ bazel build //myapp:all --cpu=x86 -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -$ bazel build //mypro:all --cpu=ppc -DEBUG: /myworkspace/myapp/defs.bzl:4:3: TRUE. -``` - -This happens because macros don't understand the contents of `select()`. -So what they're really evaluting is the `select()` object itself. According to -[Pythonic](https://docs.python.org/release/2.5.2/lib/truth.html) design -standards, all objects aside from a very small number of exceptions -automatically return true. - -### Can I read select() like a dict? - -Macros [can't](#faq-select-macro) evaluate select(s) because macros evaluate before -Bazel knows what the build's command line parameters are. Can they at least read -the `select()`'s dictionary to, for example, add a suffix to each value? - -Conceptually this is possible, but [it isn't yet a Bazel feature](https://github.com/bazelbuild/bazel/issues/8419). -What you *can* do today is prepare a straight dictionary, then feed it into a -`select()`: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + select(select_cmd + {"//conditions:default": "default"}) - + " > $@" - ) - -$ cat myapp/BUILD -selecty_genrule( - name = "selecty", - select_cmd = { - "//third_party/bazel_platforms/cpu:x86_32": "x86 mode", - }, -) - -$ bazel build //testapp:selecty --cpu=x86 && cat bazel-genfiles/testapp/selecty.out -x86 mode WITH SUFFIX -``` - -If you'd like to support both `select()` and native types, you can do this: - -```sh -$ cat myapp/defs.bzl -def selecty_genrule(name, select_cmd): - cmd_suffix = "" - if type(select_cmd) == "string": - cmd_suffix = select_cmd + " WITH SUFFIX" - elif type(select_cmd) == "dict": - for key in select_cmd.keys(): - select_cmd[key] += " WITH SUFFIX" - cmd_suffix = select(select_cmd + {"//conditions:default": "default"}) - - native.genrule( - name = name, - outs = [name + ".out"], - srcs = [], - cmd = "echo " + cmd_suffix + "> $@", - ) -``` - -### Why doesn't select() work with bind()? - -First of all, do not use `bind()`. It is deprecated in favor of `alias()`. - -The technical answer is that [`bind()`](/reference/be/workspace#bind) is a repo -rule, not a BUILD rule. - -Repo rules do not have a specific configuration, and aren't evaluated in -the same way as BUILD rules. Therefore, a `select()` in a `bind()` can't -actually evaluate to any specific branch. - -Instead, you should use [`alias()`](/reference/be/general#alias), with a `select()` in -the `actual` attribute, to perform this type of run-time determination. This -works correctly, since `alias()` is a BUILD rule, and is evaluated with a -specific configuration. - -You can even have a `bind()` target point to an `alias()`, if needed. - -```sh -$ cat WORKSPACE -workspace(name = "myapp") -bind(name = "openssl", actual = "//:ssl") -http_archive(name = "alternative", ...) -http_archive(name = "boringssl", ...) - -$ cat BUILD -config_setting( - name = "alt_ssl", - define_values = { - "ssl_library": "alternative", - }, -) - -alias( - name = "ssl", - actual = select({ - "//:alt_ssl": "@alternative//:ssl", - "//conditions:default": "@boringssl//:ssl", - }), -) -``` - -With this setup, you can pass `--define ssl_library=alternative`, and any target -that depends on either `//:ssl` or `//external:ssl` will see the alternative -located at `@alternative//:ssl`. - -But really, stop using `bind()`. - -### Why doesn't my select() choose what I expect? - -If `//myapp:foo` has a `select()` that doesn't choose the condition you expect, -use [cquery](/query/cquery) and `bazel config` to debug: - -If `//myapp:foo` is the top-level target you're building, run: - -```sh -$ bazel cquery //myapp:foo -//myapp:foo (12e23b9a2b534a) -``` - -If you're building some other target `//bar` that depends on -//myapp:foo somewhere in its subgraph, run: - -```sh -$ bazel cquery 'somepath(//bar, //myapp:foo)' -//bar:bar (3ag3193fee94a2) -//bar:intermediate_dep (12e23b9a2b534a) -//myapp:foo (12e23b9a2b534a) -``` - -The `(12e23b9a2b534a)` next to `//myapp:foo` is a *hash* of the -configuration that resolves `//myapp:foo`'s `select()`. You can inspect its -values with `bazel config`: - -```sh -$ bazel config 12e23b9a2b534a -BuildConfigurationValue 12e23b9a2b534a -Fragment com.google.devtools.build.lib.analysis.config.CoreOptions { - cpu: darwin - compilation_mode: fastbuild - ... -} -Fragment com.google.devtools.build.lib.rules.cpp.CppOptions { - linkopt: [-Dfoo=bar] - ... -} -... -``` - -Then compare this output against the settings expected by each `config_setting`. - -`//myapp:foo` may exist in different configurations in the same build. See the -[cquery docs](/query/cquery) for guidance on using `somepath` to get the right -one. - -Caution: To prevent restarting the Bazel server, invoke `bazel config` with the -same command line flags as the `bazel cquery`. The `config` command relies on -the configuration nodes from the still-running server of the previous command. - -### Why doesn't `select()` work with platforms? - -Bazel doesn't support configurable attributes checking whether a given platform -is the target platform because the semantics are unclear. - -For example: - -```py -platform( - name = "x86_linux_platform", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -In this `BUILD` file, which `select()` should be used if the target platform has both the -`@platforms//cpu:x86` and `@platforms//os:linux` constraints, but is **not** the -`:x86_linux_platform` defined here? The author of the `BUILD` file and the user -who defined the separate platform may have different ideas. - -#### What should I do instead? - -Instead, define a `config_setting` that matches **any** platform with -these constraints: - -```py -config_setting( - name = "is_x86_linux", - constraint_values = [ - "@platforms//cpu:x86", - "@platforms//os:linux", - ], -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_x86_linux": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -This process defines specific semantics, making it clearer to users what -platforms meet the desired conditions. - -#### What if I really, really want to `select` on the platform? - -If your build requirements specifically require checking the platform, you -can flip the value of the `--platforms` flag in a `config_setting`: - -```py -config_setting( - name = "is_specific_x86_linux_platform", - values = { - "platforms": ["//package:x86_linux_platform"], - }, -) - -cc_library( - name = "lib", - srcs = [...], - linkopts = select({ - ":is_specific_x86_linux_platform": ["--enable_x86_optimizations"], - "//conditions:default": [], - }), -) -``` - -The Bazel team doesn't endorse doing this; it overly constrains your build and -confuses users when the expected condition does not match. - -[BuildSettings]: /extending/config#user-defined-build-settings diff --git a/8.4.2/docs/sandboxing.mdx b/8.4.2/docs/sandboxing.mdx deleted file mode 100644 index 6869795..0000000 --- a/8.4.2/docs/sandboxing.mdx +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: 'Sandboxing' ---- - - - -This article covers sandboxing in Bazel and debugging your sandboxing -environment. - -*Sandboxing* is a permission restricting strategy that isolates processes from -each other or from resources in a system. For Bazel, this means restricting file -system access. - -Bazel's file system sandbox runs processes in a working directory that only -contains known inputs, such that compilers and other tools don't see source -files they should not access, unless they know the absolute paths to them. - -Sandboxing doesn't hide the host environment in any way. Processes can freely -access all files on the file system. However, on platforms that support user -namespaces, processes can't modify any files outside their working directory. -This ensures that the build graph doesn't have hidden dependencies that could -affect the reproducibility of the build. - -More specifically, Bazel constructs an `execroot/` directory for each action, -which acts as the action's work directory at execution time. `execroot/` -contains all input files to the action and serves as the container for any -generated outputs. Bazel then uses an operating-system-provided technique, -containers on Linux and `sandbox-exec` on macOS, to constrain the action within -`execroot/`. - -## Reasons for sandboxing - -- Without action sandboxing, Bazel doesn't know if a tool uses undeclared - input files (files that are not explicitly listed in the dependencies of an - action). When one of the undeclared input files changes, Bazel still - believes that the build is up-to-date and won’t rebuild the action. This can - result in an incorrect incremental build. - -- Incorrect reuse of cache entries creates problems during remote caching. A - bad cache entry in a shared cache affects every developer on the project, - and wiping the entire remote cache is not a feasible solution. - -- Sandboxing mimics the behavior of remote execution — if a build works well - with sandboxing, it will likely also work with remote execution. By making - remote execution upload all necessary files (including local tools), you can - significantly reduce maintenance costs for compile clusters compared to - having to install the tools on every machine in the cluster every time you - want to try out a new compiler or make a change to an existing tool. - -## What sandbox strategy to use - -You can choose which kind of sandboxing to use, if any, with the -[strategy flags](user-manual.html#strategy-options). Using the `sandboxed` -strategy makes Bazel pick one of the sandbox implementations listed below, -preferring an OS-specific sandbox to the less hermetic generic one. -[Persistent workers](/remote/persistent) run in a generic sandbox if you pass -the `--worker_sandboxing` flag. - -The `local` (a.k.a. `standalone`) strategy does not do any kind of sandboxing. -It simply executes the action's command line with the working directory set to -the execroot of your workspace. - -`processwrapper-sandbox` is a sandboxing strategy that does not require any -"advanced" features - it should work on any POSIX system out of the box. It -builds a sandbox directory consisting of symlinks that point to the original -source files, executes the action's command line with the working directory set -to this directory instead of the execroot, then moves the known output artifacts -out of the sandbox into the execroot and deletes the sandbox. This prevents the -action from accidentally using any input files that are not declared and from -littering the execroot with unknown output files. - -`linux-sandbox` goes one step further and builds on top of the -`processwrapper-sandbox`. Similar to what Docker does under the hood, it uses -Linux Namespaces (User, Mount, PID, Network and IPC namespaces) to isolate the -action from the host. That is, it makes the entire filesystem read-only except -for the sandbox directory, so the action cannot accidentally modify anything on -the host filesystem. This prevents situations like a buggy test accidentally rm --rf'ing your $HOME directory. Optionally, you can also prevent the action from -accessing the network. `linux-sandbox` uses PID namespaces to prevent the action -from seeing any other processes and to reliably kill all processes (even daemons -spawned by the action) at the end. - -`darwin-sandbox` is similar, but for macOS. It uses Apple's `sandbox-exec` tool -to achieve roughly the same as the Linux sandbox. - -Both the `linux-sandbox` and the `darwin-sandbox` do not work in a "nested" -scenario due to restrictions in the mechanisms provided by the operating -systems. Because Docker also uses Linux namespaces for its container magic, you -cannot easily run `linux-sandbox` inside a Docker container, unless you use -`docker run --privileged`. On macOS, you cannot run `sandbox-exec` inside a -process that's already being sandboxed. Thus, in these cases, Bazel -automatically falls back to using `processwrapper-sandbox`. - -If you would rather get a build error — such as to not accidentally build with a -less strict execution strategy — explicitly modify the list of execution -strategies that Bazel tries to use (for example, `bazel build ---spawn_strategy=worker,linux-sandbox`). - -Dynamic execution usually requires sandboxing for local execution. To opt out, -pass the `--experimental_local_lockfree_output` flag. Dynamic execution silently -sandboxes [persistent workers](/remote/persistent). - -## Downsides to sandboxing - -- Sandboxing incurs extra setup and teardown cost. How big this cost is - depends on many factors, including the shape of the build and the - performance of the host OS. For Linux, sandboxed builds are rarely more than - a few percent slower. Setting `--reuse_sandbox_directories` can - mitigate the setup and teardown cost. - -- Sandboxing effectively disables any cache the tool may have. You can - mitigate this by using [persistent workers](/remote/persistent), at - the cost of weaker sandbox guarantees. - -- [Multiplex workers](/remote/multiplex) require explicit worker support - to be sandboxed. Workers that do not support multiplex sandboxing run as - singleplex workers under dynamic execution, which can cost extra memory. - -## Debugging - -Follow the strategies below to debug issues with sandboxing. - -### Deactivated namespaces - -On some platforms, such as -[Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine/) -cluster nodes or Debian, user namespaces are deactivated by default due to -security concerns. If the `/proc/sys/kernel/unprivileged_userns_clone` file -exists and contains a 0, you can activate user namespaces by running: - -```posix-terminal - sudo sysctl kernel.unprivileged_userns_clone=1 -``` - -### Rule execution failures - -The sandbox may fail to execute rules because of the system setup. If you see a -message like `namespace-sandbox.c:633: execvp(argv[0], argv): No such file or -directory`, try to deactivate the sandbox with `--strategy=Genrule=local` for -genrules, and `--spawn_strategy=local` for other rules. - -### Detailed debugging for build failures - -If your build failed, use `--verbose_failures` and `--sandbox_debug` to make -Bazel show the exact command it ran when your build failed, including the part -that sets up the sandbox. - -Example error message: - -``` -ERROR: path/to/your/project/BUILD:1:1: compilation of rule -'//path/to/your/project:all' failed: - -Sandboxed execution failed, which may be legitimate (such as a compiler error), -or due to missing dependencies. To enter the sandbox environment for easier -debugging, run the following command in parentheses. On command failure, a bash -shell running inside the sandbox will then automatically be spawned - -namespace-sandbox failed: error executing command - (cd /some/path && \ - exec env - \ - LANG=en_US \ - PATH=/some/path/bin:/bin:/usr/bin \ - PYTHONPATH=/usr/local/some/path \ - /some/path/namespace-sandbox @/sandbox/root/path/this-sandbox-name.params -- - /some/path/to/your/some-compiler --some-params some-target) -``` - -You can now inspect the generated sandbox directory and see which files Bazel -created and run the command again to see how it behaves. - -Note that Bazel does not delete the sandbox directory when you use -`--sandbox_debug`. Unless you are actively debugging, you should disable -`--sandbox_debug` because it fills up your disk over time. diff --git a/8.4.2/extending/aspects.mdx b/8.4.2/extending/aspects.mdx deleted file mode 100644 index 4e25125..0000000 --- a/8.4.2/extending/aspects.mdx +++ /dev/null @@ -1,412 +0,0 @@ ---- -title: 'Aspects' ---- - - - -This page explains the basics and benefits of using -[aspects](/rules/lib/globals/bzl#aspect) and provides simple and advanced -examples. - -Aspects allow augmenting build dependency graphs with additional information -and actions. Some typical scenarios when aspects can be useful: - -* IDEs that integrate Bazel can use aspects to collect information about the - project. -* Code generation tools can leverage aspects to execute on their inputs in - *target-agnostic* manner. As an example, `BUILD` files can specify a hierarchy - of [protobuf](https://developers.google.com/protocol-buffers/) library - definitions, and language-specific rules can use aspects to attach - actions generating protobuf support code for a particular language. - -## Aspect basics - -`BUILD` files provide a description of a project’s source code: what source -files are part of the project, what artifacts (_targets_) should be built from -those files, what the dependencies between those files are, etc. Bazel uses -this information to perform a build, that is, it figures out the set of actions -needed to produce the artifacts (such as running compiler or linker) and -executes those actions. Bazel accomplishes this by constructing a _dependency -graph_ between targets and visiting this graph to collect those actions. - -Consider the following `BUILD` file: - -```python -java_library(name = 'W', ...) -java_library(name = 'Y', deps = [':W'], ...) -java_library(name = 'Z', deps = [':W'], ...) -java_library(name = 'Q', ...) -java_library(name = 'T', deps = [':Q'], ...) -java_library(name = 'X', deps = [':Y',':Z'], runtime_deps = [':T'], ...) -``` - -This `BUILD` file defines a dependency graph shown in the following figure: - -![Build graph](/rules/build-graph.png "Build graph") - -**Figure 1.** `BUILD` file dependency graph. - -Bazel analyzes this dependency graph by calling an implementation function of -the corresponding [rule](/extending/rules) (in this case "java_library") for every -target in the above example. Rule implementation functions generate actions that -build artifacts, such as `.jar` files, and pass information, such as locations -and names of those artifacts, to the reverse dependencies of those targets in -[providers](/extending/rules#providers). - -Aspects are similar to rules in that they have an implementation function that -generates actions and returns providers. However, their power comes from -the way the dependency graph is built for them. An aspect has an implementation -and a list of all attributes it propagates along. Consider an aspect A that -propagates along attributes named "deps". This aspect can be applied to -a target X, yielding an aspect application node A(X). During its application, -aspect A is applied recursively to all targets that X refers to in its "deps" -attribute (all attributes in A's propagation list). - -Thus a single act of applying aspect A to a target X yields a "shadow graph" of -the original dependency graph of targets shown in the following figure: - -![Build Graph with Aspect](/rules/build-graph-aspects.png "Build graph with aspects") - -**Figure 2.** Build graph with aspects. - -The only edges that are shadowed are the edges along the attributes in -the propagation set, thus the `runtime_deps` edge is not shadowed in this -example. An aspect implementation function is then invoked on all nodes in -the shadow graph similar to how rule implementations are invoked on the nodes -of the original graph. - -## Simple example - -This example demonstrates how to recursively print the source files for a -rule and all of its dependencies that have a `deps` attribute. It shows -an aspect implementation, an aspect definition, and how to invoke the aspect -from the Bazel command line. - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] - -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` - -Let's break the example up into its parts and examine each one individually. - -### Aspect definition - -```python -print_aspect = aspect( - implementation = _print_aspect_impl, - attr_aspects = ['deps'], -) -``` -Aspect definitions are similar to rule definitions, and defined using -the [`aspect`](/rules/lib/globals/bzl#aspect) function. - -Just like a rule, an aspect has an implementation function which in this case is -``_print_aspect_impl``. - -``attr_aspects`` is a list of rule attributes along which the aspect propagates. -In this case, the aspect will propagate along the ``deps`` attribute of the -rules that it is applied to. - -Another common argument for `attr_aspects` is `['*']` which would propagate the -aspect to all attributes of a rule. - -### Aspect implementation - -```python -def _print_aspect_impl(target, ctx): - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the files that make up the sources and - # print their paths. - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - print(f.path) - return [] -``` - -Aspect implementation functions are similar to the rule implementation -functions. They return [providers](/extending/rules#providers), can generate -[actions](/extending/rules#actions), and take two arguments: - -* `target`: the [target](/rules/lib/builtins/Target) the aspect is being applied to. -* `ctx`: [`ctx`](/rules/lib/builtins/ctx) object that can be used to access attributes - and generate outputs and actions. - -The implementation function can access the attributes of the target rule via -[`ctx.rule.attr`](/rules/lib/builtins/ctx#rule). It can examine providers that are -provided by the target to which it is applied (via the `target` argument). - -Aspects are required to return a list of providers. In this example, the aspect -does not provide anything, so it returns an empty list. - -### Invoking the aspect using the command line - -The simplest way to apply an aspect is from the command line using the -[`--aspects`](/reference/command-line-reference#flag--aspects) -argument. Assuming the aspect above were defined in a file named `print.bzl` -this: - -```bash -bazel build //MyExample:example --aspects print.bzl%print_aspect -``` - -would apply the `print_aspect` to the target `example` and all of the -target rules that are accessible recursively via the `deps` attribute. - -The `--aspects` flag takes one argument, which is a specification of the aspect -in the format `%`. - -## Advanced example - -The following example demonstrates using an aspect from a target rule -that counts files in targets, potentially filtering them by extension. -It shows how to use a provider to return values, how to use parameters to pass -an argument into an aspect implementation, and how to invoke an aspect from a rule. - -Note: Aspects added in rules' attributes are called *rule-propagated aspects* as -opposed to *command-line aspects* that are specified using the ``--aspects`` -flag. - -`file_count.bzl` file: - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] - -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) - -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -`BUILD.bazel` file: - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_library( - name = 'lib', - srcs = [ - 'lib.h', - 'lib.cc', - ], -) - -cc_binary( - name = 'app', - srcs = [ - 'app.h', - 'app.cc', - 'main.cc', - ], - deps = ['lib'], -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -### Aspect definition - -```python -file_count_aspect = aspect( - implementation = _file_count_aspect_impl, - attr_aspects = ['deps'], - attrs = { - 'extension' : attr.string(values = ['*', 'h', 'cc']), - } -) -``` - -This example shows how the aspect propagates through the ``deps`` attribute. - -``attrs`` defines a set of attributes for an aspect. Public aspect attributes -define parameters and can only be of types ``bool``, ``int`` or ``string``. -For rule-propagated aspects, ``int`` and ``string`` parameters must have -``values`` specified on them. This example has a parameter called ``extension`` -that is allowed to have '``*``', '``h``', or '``cc``' as a value. - -For rule-propagated aspects, parameter values are taken from the rule requesting -the aspect, using the attribute of the rule that has the same name and type. -(see the definition of ``file_count_rule``). - -For command-line aspects, the parameters values can be passed using -[``--aspects_parameters``](/reference/command-line-reference#flag--aspects_parameters) -flag. The ``values`` restriction of ``int`` and ``string`` parameters may be -omitted. - -Aspects are also allowed to have private attributes of types ``label`` or -``label_list``. Private label attributes can be used to specify dependencies on -tools or libraries that are needed for actions generated by aspects. There is not -a private attribute defined in this example, but the following code snippet -demonstrates how you could pass in a tool to an aspect: - -```python -... - attrs = { - '_protoc' : attr.label( - default = Label('//tools:protoc'), - executable = True, - cfg = "exec" - ) - } -... -``` - -### Aspect implementation - -```python -FileCountInfo = provider( - fields = { - 'count' : 'number of files' - } -) - -def _file_count_aspect_impl(target, ctx): - count = 0 - # Make sure the rule has a srcs attribute. - if hasattr(ctx.rule.attr, 'srcs'): - # Iterate through the sources counting files - for src in ctx.rule.attr.srcs: - for f in src.files.to_list(): - if ctx.attr.extension == '*' or ctx.attr.extension == f.extension: - count = count + 1 - # Get the counts from our dependencies. - for dep in ctx.rule.attr.deps: - count = count + dep[FileCountInfo].count - return [FileCountInfo(count = count)] -``` - -Just like a rule implementation function, an aspect implementation function -returns a struct of providers that are accessible to its dependencies. - -In this example, the ``FileCountInfo`` is defined as a provider that has one -field ``count``. It is best practice to explicitly define the fields of a -provider using the ``fields`` attribute. - -The set of providers for an aspect application A(X) is the union of providers -that come from the implementation of a rule for target X and from the -implementation of aspect A. The providers that a rule implementation propagates -are created and frozen before aspects are applied and cannot be modified from an -aspect. It is an error if a target and an aspect that is applied to it each -provide a provider with the same type, with the exceptions of -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) -(which is merged, so long as the -rule and aspect specify different output groups) and -[`InstrumentedFilesInfo`](/rules/lib/providers/InstrumentedFilesInfo) -(which is taken from the aspect). This means that aspect implementations may -never return [`DefaultInfo`](/rules/lib/providers/DefaultInfo). - -The parameters and private attributes are passed in the attributes of the -``ctx``. This example references the ``extension`` parameter and determines -what files to count. - -For returning providers, the values of attributes along which -the aspect is propagated (from the `attr_aspects` list) are replaced with -the results of an application of the aspect to them. For example, if target -X has Y and Z in its deps, `ctx.rule.attr.deps` for A(X) will be [A(Y), A(Z)]. -In this example, ``ctx.rule.attr.deps`` are Target objects that are the -results of applying the aspect to the 'deps' of the original target to which -the aspect has been applied. - -In the example, the aspect accesses the ``FileCountInfo`` provider from the -target's dependencies to accumulate the total transitive number of files. - -### Invoking the aspect from a rule - -```python -def _file_count_rule_impl(ctx): - for dep in ctx.attr.deps: - print(dep[FileCountInfo].count) - -file_count_rule = rule( - implementation = _file_count_rule_impl, - attrs = { - 'deps' : attr.label_list(aspects = [file_count_aspect]), - 'extension' : attr.string(default = '*'), - }, -) -``` - -The rule implementation demonstrates how to access the ``FileCountInfo`` -via the ``ctx.attr.deps``. - -The rule definition demonstrates how to define a parameter (``extension``) -and give it a default value (``*``). Note that having a default value that -was not one of '``cc``', '``h``', or '``*``' would be an error due to the -restrictions placed on the parameter in the aspect definition. - -### Invoking an aspect through a target rule - -```python -load('//:file_count.bzl', 'file_count_rule') - -cc_binary( - name = 'app', -... -) - -file_count_rule( - name = 'file_count', - deps = ['app'], - extension = 'h', -) -``` - -This demonstrates how to pass the ``extension`` parameter into the aspect -via the rule. Since the ``extension`` parameter has a default value in the -rule implementation, ``extension`` would be considered an optional parameter. - -When the ``file_count`` target is built, our aspect will be evaluated for -itself, and all of the targets accessible recursively via ``deps``. - -## References - -* [`aspect` API reference](/rules/lib/globals/bzl#aspect) diff --git a/8.4.2/extending/auto-exec-groups.mdx b/8.4.2/extending/auto-exec-groups.mdx deleted file mode 100644 index abba3d5..0000000 --- a/8.4.2/extending/auto-exec-groups.mdx +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: 'Automatic Execution Groups (AEGs)' ---- - - -Automatic execution groups select an [execution platform][exec_platform] -for each toolchain type. In other words, one target can have multiple -execution platforms without defining execution groups. - -## Quick summary - -Automatic execution groups are closely connected to toolchains. If you are using -toolchains, you need to set them on the affected actions (actions which use an -executable or a tool from a toolchain) by adding `toolchain` parameter. For -example: - -```python -ctx.actions.run( - ..., - executable = ctx.toolchain['@bazel_tools//tools/jdk:toolchain_type'].tool, - ..., - toolchain = '@bazel_tools//tools/jdk:toolchain_type', -) -``` -If the action does not use a tool or executable from a toolchain, and Blaze -doesn't detect that ([the error](#first-error-message) is raised), you can set -`toolchain = None`. - -If you need to use multiple toolchains on a single execution platform (an action -uses executable or tools from two or more toolchains), you need to manually -define [exec_groups][exec_groups] (check -[When should I use a custom exec_group?][multiple_toolchains_exec_groups] -section). - -## History - -Before AEGs, the execution platform was selected on a rule level. For example: - -```python -my_rule = rule( - _impl, - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], -) -``` - -Rule `my_rule` registers two toolchain types. This means that the [Toolchain -Resolution](https://bazel.build/extending/toolchains#toolchain-resolution) used -to find an execution platform which supports both toolchain types. The selected -execution platform was used for each registered action inside the rule, unless -specified differently with [exec_groups][exec_groups]. -In other words, all actions inside the rule used to have a single execution -platform even if they used tools from different toolchains (execution platform -is selected for each target). This resulted in failures when there was no -execution platform supporting all toolchains. - -## Current state - -With AEGs, the execution platform is selected for each toolchain type. The -implementation function of the earlier example, `my_rule`, would look like: - -```python -def _impl(ctx): - ctx.actions.run( - mnemonic = "First action", - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - toolchain = '//tools:toolchain_type_1', - ) - - ctx.actions.run( - mnemonic = "Second action", - executable = ctx.toolchain['//tools:toolchain_type_2'].tool, - toolchain = '//tools:toolchain_type_2', - ) -``` - -This rule creates two actions, the `First action` which uses executable from a -`//tools:toolchain_type_1` and the `Second action` which uses executable from a -`//tools:toolchain_type_2`. Before AEGs, both of these actions would be executed -on a single execution platform which supports both toolchain types. With AEGs, -by adding the `toolchain` parameter inside the actions, each action executes on -the execution platform that provides the toolchain. The actions may be executed -on different execution platforms. - -The same is effective with [ctx.actions.run_shell][run_shell] where `toolchain` -parameter should be added when `tools` are from a toolchain. - -## Difference between custom exec groups and automatic exec groups - -As the name suggests, AEGs are exec groups created automatically for each -toolchain type registered on a rule. There is no need to manually specify them, -unlike the "classic" exec groups. - -### When should I use a custom exec_group? - -Custom exec_groups are needed only in case where multiple toolchains need to -execute on a single execution platform. In all other cases there's no need to -define custom exec_groups. For example: - -```python -def _impl(ctx): - ctx.actions.run( - ..., - executable = ctx.toolchain['//tools:toolchain_type_1'].tool, - tools = [ctx.toolchain['//tools:toolchain_type_2'].tool], - exec_group = 'two_toolchains', - ) -``` - -```python -my_rule = rule( - _impl, - exec_groups = { - "two_toolchains": exec_group( - toolchains = ['//tools:toolchain_type_1', '//tools:toolchain_type_2'], - ), - } -) -``` - -## Migration of AEGs - -Internally in google3, Blaze is already using AEGs. -Externally for Bazel, migration is in the process. Some rules are already using -this feature (e.g. Java and C++ rules). - -### Which Bazel versions support this migration? - -AEGs are fully supported from Bazel 7. - -### How to enable AEGs? - -Set `--incompatible_auto_exec_groups` to true. More information about the flag -on [the GitHub issue][github_flag]. - -### How to enable AEGs inside a particular rule? - -Set the `_use_auto_exec_groups` attribute on a rule. - -```python -my_rule = rule( - _impl, - attrs = { - "_use_auto_exec_groups": attr.bool(default = True), - } -) -``` -This enables AEGs only in `my_rule` and its actions start using the new logic -when selecting the execution platform. Incompatible flag is overridden with this -attribute. - -### How to disable AEGs in case of an error? - -Set `--incompatible_auto_exec_groups` to false to completely disable AEGs in -your project ([flag's GitHub issue][github_flag]), or disable a particular rule -by setting `_use_auto_exec_groups` attribute to `False` -([more details about the attribute](#how-enable-particular-rule)). - -### Error messages while migrating to AEGs - -#### Couldn't identify if tools are from implicit dependencies or a toolchain. Please set the toolchain parameter. If you're not using a toolchain, set it to 'None'. - * In this case you get a stack of calls before the error happened and you can - clearly see which exact action needs the toolchain parameter. Check which - toolchain is used for the action and set it with the toolchain param. If no - toolchain is used inside the action for tools or executable, set it to - `None`. - -#### Action declared for non-existent toolchain '[toolchain_type]'. - * This means that you've set the toolchain parameter on the action but didn't -register it on the rule. Register the toolchain or set `None` inside the action. - -## Additional material - -For more information, check design document: -[Automatic exec groups for toolchains][aegs_design_doc]. - -[exec_platform]: https://bazel.build/extending/platforms#:~:text=Execution%20%2D%20a%20platform%20on%20which%20build%20tools%20execute%20build%20actions%20to%20produce%20intermediate%20and%20final%20outputs. -[exec_groups]: https://bazel.build/extending/exec-groups -[github_flag]: https://github.com/bazelbuild/bazel/issues/17134 -[aegs_design_doc]: https://docs.google.com/document/d/1-rbP_hmKs9D639YWw5F_JyxPxL2bi6dSmmvj_WXak9M/edit#heading=h.5mcn15i0e1ch -[run_shell]: https://bazel.build/rules/lib/builtins/actions#run_shell -[multiple_toolchains_exec_groups]: /extending/auto-exec-groups#when-should-use-exec-groups diff --git a/8.4.2/extending/concepts.mdx b/8.4.2/extending/concepts.mdx deleted file mode 100644 index eb1d6b8..0000000 --- a/8.4.2/extending/concepts.mdx +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: 'Extension Overview' ---- - - - - -This page describes how to extend the BUILD language using macros -and rules. - -Bazel extensions are files ending in `.bzl`. Use a -[load statement](/concepts/build-files#load) to import a symbol from an extension. - -Before learning the more advanced concepts, first: - -* Read about the [Starlark language](/rules/language), used in both the - `BUILD` and `.bzl` files. - -* Learn how you can [share variables](/build/share-variables) - between two `BUILD` files. - -## Macros and rules - -A macro is a function that instantiates rules. Macros come in two flavors: -[symbolic macros](/extending/macros) (new in Bazel 8) and [legacy -macros](/extending/legacy-macros). The two flavors of macros are defined -differently, but behave almost the same from the point of view of a user. A -macro is useful when a `BUILD` file is getting too repetitive or too complex, as -it lets you reuse some code. The function is evaluated as soon as the `BUILD` -file is read. After the evaluation of the `BUILD` file, Bazel has little -information about macros. If your macro generates a `genrule`, Bazel will -behave *almost* as if you declared that `genrule` in the `BUILD` file. (The one -exception is that targets declared in a symbolic macro have [special visibility -semantics](/extending/macros#visibility): a symbolic macro can hide its internal -targets from the rest of the package.) - -A [rule](/extending/rules) is more powerful than a macro. It can access Bazel -internals and have full control over what is going on. It may for example pass -information to other rules. - -If you want to reuse simple logic, start with a macro; we recommend a symbolic -macro, unless you need to support older Bazel versions. If a macro becomes -complex, it is often a good idea to make it a rule. Support for a new language -is typically done with a rule. Rules are for advanced users, and most users will -never have to write one; they will only load and call existing rules. - -## Evaluation model - -A build consists of three phases. - -* **Loading phase**. First, load and evaluate all extensions and all `BUILD` - files that are needed for the build. The execution of the `BUILD` files simply - instantiates rules (each time a rule is called, it gets added to a graph). - This is where macros are evaluated. - -* **Analysis phase**. The code of the rules is executed (their `implementation` - function), and actions are instantiated. An action describes how to generate - a set of outputs from a set of inputs, such as "run gcc on hello.c and get - hello.o". You must list explicitly which files will be generated before - executing the actual commands. In other words, the analysis phase takes - the graph generated by the loading phase and generates an action graph. - -* **Execution phase**. Actions are executed, when at least one of their outputs is - required. If a file is missing or if a command fails to generate one output, - the build fails. Tests are also run during this phase. - -Bazel uses parallelism to read, parse and evaluate the `.bzl` files and `BUILD` -files. A file is read at most once per build and the result of the evaluation is -cached and reused. A file is evaluated only once all its dependencies (`load()` -statements) have been resolved. By design, loading a `.bzl` file has no visible -side-effect, it only defines values and functions. - -Bazel tries to be clever: it uses dependency analysis to know which files must -be loaded, which rules must be analyzed, and which actions must be executed. For -example, if a rule generates actions that you don't need for the current build, -they will not be executed. - -## Creating extensions - -* [Create your first macro](/rules/macro-tutorial) in order to reuse some code. - Then [learn more about macros](/extending/macros) and [using them to create - "custom verbs"](/rules/verbs-tutorial). - -* [Follow the rules tutorial](/rules/rules-tutorial) to get started with rules. - Next, you can read more about the [rules concepts](/extending/rules). - -The two links below will be very useful when writing your own extensions. Keep -them within reach: - -* The [API reference](/rules/lib) - -* [Examples](https://github.com/bazelbuild/examples/tree/master/rules) - -## Going further - -In addition to [macros](/extending/macros) and [rules](/extending/rules), you -may want to write [aspects](/extending/aspects) and [repository -rules](/extending/repo). - -* Use [Buildifier](https://github.com/bazelbuild/buildtools) - consistently to format and lint your code. - -* Follow the [`.bzl` style guide](/rules/bzl-style). - -* [Test](/rules/testing) your code. - -* [Generate documentation](https://skydoc.bazel.build/) to help your users. - -* [Optimize the performance](/rules/performance) of your code. - -* [Deploy](/rules/deploying) your extensions to other people. diff --git a/8.4.2/extending/depsets.mdx b/8.4.2/extending/depsets.mdx deleted file mode 100644 index 2aa8a1f..0000000 --- a/8.4.2/extending/depsets.mdx +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: 'Depsets' ---- - - - -[Depsets](/rules/lib/builtins/depset) are a specialized data structure for efficiently -collecting data across a target’s transitive dependencies. They are an essential -element of rule processing. - -The defining feature of depset is its time- and space-efficient union operation. -The depset constructor accepts a list of elements ("direct") and a list of other -depsets ("transitive"), and returns a depset representing a set containing all the -direct elements and the union of all the transitive sets. Conceptually, the -constructor creates a new graph node that has the direct and transitive nodes -as its successors. Depsets have a well-defined ordering semantics, based on -traversal of this graph. - -Example uses of depsets include: - -* Storing the paths of all object files for a program’s libraries, which can - then be passed to a linker action through a provider. - -* For an interpreted language, storing the transitive source files that are - included in an executable's runfiles. - -## Description and operations - -Conceptually, a depset is a directed acyclic graph (DAG) that typically looks -similar to the target graph. It is constructed from the leaves up to the root. -Each target in a dependency chain can add its own contents on top of the -previous without having to read or copy them. - -Each node in the DAG holds a list of direct elements and a list of child nodes. -The contents of the depset are the transitive elements, such as the direct elements -of all the nodes. A new depset can be created using the -[depset](/rules/lib/globals/bzl#depset) constructor: it accepts a list of direct -elements and another list of child nodes. - -```python -s = depset(["a", "b", "c"]) -t = depset(["d", "e"], transitive = [s]) - -print(s) # depset(["a", "b", "c"]) -print(t) # depset(["d", "e", "a", "b", "c"]) -``` - -To retrieve the contents of a depset, use the -[to_list()](/rules/lib/builtins/depset#to_list) method. It returns a list of all transitive -elements, not including duplicates. There is no way to directly inspect the -precise structure of the DAG, although this structure does affect the order in -which the elements are returned. - -```python -s = depset(["a", "b", "c"]) - -print("c" in s.to_list()) # True -print(s.to_list() == ["a", "b", "c"]) # True -``` - -The allowed items in a depset are restricted, just as the allowed keys in -dictionaries are restricted. In particular, depset contents may not be mutable. - -Depsets use reference equality: a depset is equal to itself, but unequal to any -other depset, even if they have the same contents and same internal structure. - -```python -s = depset(["a", "b", "c"]) -t = s -print(s == t) # True - -t = depset(["a", "b", "c"]) -print(s == t) # False - -d = {} -d[s] = None -d[t] = None -print(len(d)) # 2 -``` - -To compare depsets by their contents, convert them to sorted lists. - -```python -s = depset(["a", "b", "c"]) -t = depset(["c", "b", "a"]) -print(sorted(s.to_list()) == sorted(t.to_list())) # True -``` - -There is no ability to remove elements from a depset. If this is needed, you -must read out the entire contents of the depset, filter the elements you want to -remove, and reconstruct a new depset. This is not particularly efficient. - -```python -s = depset(["a", "b", "c"]) -t = depset(["b", "c"]) - -# Compute set difference s - t. Precompute t.to_list() so it's not done -# in a loop, and convert it to a dictionary for fast membership tests. -t_items = {e: None for e in t.to_list()} -diff_items = [x for x in s.to_list() if x not in t_items] -# Convert back to depset if it's still going to be used for union operations. -s = depset(diff_items) -print(s) # depset(["a"]) -``` - -### Order - -The `to_list` operation performs a traversal over the DAG. The kind of traversal -depends on the *order* that was specified at the time the depset was -constructed. It is useful for Bazel to support multiple orders because sometimes -tools care about the order of their inputs. For example, a linker action may -need to ensure that if `B` depends on `A`, then `A.o` comes before `B.o` on the -linker’s command line. Other tools might have the opposite requirement. - -Three traversal orders are supported: `postorder`, `preorder`, and -`topological`. The first two work exactly like [tree -traversals](https://en.wikipedia.org/wiki/Tree_traversal#Depth-first_search) -except that they operate on DAGs and skip already visited nodes. The third order -works as a topological sort from root to leaves, essentially the same as -preorder except that shared children are listed only after all of their parents. -Preorder and postorder operate as left-to-right traversals, but note that within -each node direct elements have no order relative to children. For topological -order, there is no left-to-right guarantee, and even the -all-parents-before-child guarantee does not apply in the case that there are -duplicate elements in different nodes of the DAG. - -```python -# This demonstrates different traversal orders. - -def create(order): - cd = depset(["c", "d"], order = order) - gh = depset(["g", "h"], order = order) - return depset(["a", "b", "e", "f"], transitive = [cd, gh], order = order) - -print(create("postorder").to_list()) # ["c", "d", "g", "h", "a", "b", "e", "f"] -print(create("preorder").to_list()) # ["a", "b", "e", "f", "c", "d", "g", "h"] -``` - -```python -# This demonstrates different orders on a diamond graph. - -def create(order): - a = depset(["a"], order=order) - b = depset(["b"], transitive = [a], order = order) - c = depset(["c"], transitive = [a], order = order) - d = depset(["d"], transitive = [b, c], order = order) - return d - -print(create("postorder").to_list()) # ["a", "b", "c", "d"] -print(create("preorder").to_list()) # ["d", "b", "a", "c"] -print(create("topological").to_list()) # ["d", "b", "c", "a"] -``` - -Due to how traversals are implemented, the order must be specified at the time -the depset is created with the constructor’s `order` keyword argument. If this -argument is omitted, the depset has the special `default` order, in which case -there are no guarantees about the order of any of its elements (except that it -is deterministic). - -## Full example - -This example is available at -[https://github.com/bazelbuild/examples/tree/main/rules/depsets](https://github.com/bazelbuild/examples/tree/main/rules/depsets). - -Suppose there is a hypothetical interpreted language Foo. In order to build -each `foo_binary` you need to know all the `*.foo` files that it directly or -indirectly depends on. - -```python -# //depsets:BUILD - -load(":foo.bzl", "foo_library", "foo_binary") - -# Our hypothetical Foo compiler. -py_binary( - name = "foocc", - srcs = ["foocc.py"], -) - -foo_library( - name = "a", - srcs = ["a.foo", "a_impl.foo"], -) - -foo_library( - name = "b", - srcs = ["b.foo", "b_impl.foo"], - deps = [":a"], -) - -foo_library( - name = "c", - srcs = ["c.foo", "c_impl.foo"], - deps = [":a"], -) - -foo_binary( - name = "d", - srcs = ["d.foo"], - deps = [":b", ":c"], -) -``` - -```python -# //depsets:foocc.py - -# "Foo compiler" that just concatenates its inputs to form its output. -import sys - -if __name__ == "__main__": - assert len(sys.argv) >= 1 - output = open(sys.argv[1], "wt") - for path in sys.argv[2:]: - input = open(path, "rt") - output.write(input.read()) -``` - -Here, the transitive sources of the binary `d` are all of the `*.foo` files in -the `srcs` fields of `a`, `b`, `c`, and `d`. In order for the `foo_binary` -target to know about any file besides `d.foo`, the `foo_library` targets need to -pass them along in a provider. Each library receives the providers from its own -dependencies, adds its own immediate sources, and passes on a new provider with -the augmented contents. The `foo_binary` rule does the same, except that instead -of returning a provider, it uses the complete list of sources to construct a -command line for an action. - -Here’s a complete implementation of the `foo_library` and `foo_binary` rules. - -```python -# //depsets/foo.bzl - -# A provider with one field, transitive_sources. -FooFiles = provider(fields = ["transitive_sources"]) - -def get_transitive_srcs(srcs, deps): - """Obtain the source files for a target and its transitive dependencies. - - Args: - srcs: a list of source files - deps: a list of targets that are direct dependencies - Returns: - a collection of the transitive sources - """ - return depset( - srcs, - transitive = [dep[FooFiles].transitive_sources for dep in deps]) - -def _foo_library_impl(ctx): - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - return [FooFiles(transitive_sources=trans_srcs)] - -foo_library = rule( - implementation = _foo_library_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - }, -) - -def _foo_binary_impl(ctx): - foocc = ctx.executable._foocc - out = ctx.outputs.out - trans_srcs = get_transitive_srcs(ctx.files.srcs, ctx.attr.deps) - srcs_list = trans_srcs.to_list() - ctx.actions.run(executable = foocc, - arguments = [out.path] + [src.path for src in srcs_list], - inputs = srcs_list + [foocc], - outputs = [out]) - -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files=True), - "deps": attr.label_list(), - "_foocc": attr.label(default=Label("//depsets:foocc"), - allow_files=True, executable=True, cfg="host") - }, - outputs = {"out": "%{name}.out"}, -) -``` - -You can test this by copying these files into a fresh package, renaming the -labels appropriately, creating the source `*.foo` files with dummy content, and -building the `d` target. - - -## Performance - -To see the motivation for using depsets, consider what would happen if -`get_transitive_srcs()` collected its sources in a list. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = [] - for dep in deps: - trans_srcs += dep[FooFiles].transitive_sources - trans_srcs += srcs - return trans_srcs -``` - -This does not take into account duplicates, so the source files for `a` -will appear twice on the command line and twice in the contents of the output -file. - -An alternative is using a general set, which can be simulated by a -dictionary where the keys are the elements and all the keys map to `True`. - -```python -def get_transitive_srcs(srcs, deps): - trans_srcs = {} - for dep in deps: - for file in dep[FooFiles].transitive_sources: - trans_srcs[file] = True - for file in srcs: - trans_srcs[file] = True - return trans_srcs -``` - -This gets rid of the duplicates, but it makes the order of the command line -arguments (and therefore the contents of the files) unspecified, although still -deterministic. - -Moreover, both approaches are asymptotically worse than the depset-based -approach. Consider the case where there is a long chain of dependencies on -Foo libraries. Processing every rule requires copying all of the transitive -sources that came before it into a new data structure. This means that the -time and space cost for analyzing an individual library or binary target -is proportional to its own height in the chain. For a chain of length n, -foolib_1 ← foolib_2 ← … ← foolib_n, the overall cost is effectively O(n^2). - -Generally speaking, depsets should be used whenever you are accumulating -information through your transitive dependencies. This helps ensure that -your build scales well as your target graph grows deeper. - -Finally, it’s important to not retrieve the contents of the depset -unnecessarily in rule implementations. One call to `to_list()` -at the end in a binary rule is fine, since the overall cost is just O(n). It’s -when many non-terminal targets try to call `to_list()` that quadratic behavior -occurs. - -For more information about using depsets efficiently, see the [performance](/rules/performance) page. - -## API Reference - -Please see [here](/rules/lib/builtins/depset) for more details. - diff --git a/8.4.2/extending/exec-groups.mdx b/8.4.2/extending/exec-groups.mdx deleted file mode 100644 index ba145e5..0000000 --- a/8.4.2/extending/exec-groups.mdx +++ /dev/null @@ -1,167 +0,0 @@ ---- -title: 'Execution Groups' ---- - - - -Execution groups allow for multiple execution platforms within a single target. -Each execution group has its own [toolchain](/extending/toolchains) dependencies and -performs its own [toolchain resolution](/extending/toolchains#toolchain-resolution). - -## Background - -Execution groups allow the rule author to define sets of actions, each with a -potentially different execution platform. Multiple execution platforms can allow -actions to execution differently, for example compiling an iOS app on a remote -(linux) worker and then linking/code signing on a local mac worker. - -Being able to define groups of actions also helps alleviate the usage of action -mnemonics as a proxy for specifying actions. Mnemonics are not guaranteed to be -unique and can only reference a single action. This is especially helpful in -allocating extra resources to specific memory and processing intensive actions -like linking in C++ builds without over-allocating to less demanding tasks. - -## Defining execution groups - -During rule definition, rule authors can -[declare](/rules/lib/globals/bzl#exec_group) -a set of execution groups. On each execution group, the rule author can specify -everything needed to select an execution platform for that execution group, -namely any constraints via `exec_compatible_with` and toolchain types via -`toolchain`. - -```python -# foo.bzl -my_rule = rule( - _impl, - exec_groups = { - “link”: exec_group( - exec_compatible_with = [ "@platforms//os:linux" ] - toolchains = ["//foo:toolchain_type"], - ), - “test”: exec_group( - toolchains = ["//foo_tools:toolchain_type"], - ), - }, - attrs = { - "_compiler": attr.label(cfg = config.exec("link")) - }, -) -``` - -In the code snippet above, you can see that tool dependencies can also specify -transition for an exec group using the -[`cfg`](/rules/lib/toplevel/attr#label) -attribute param and the -[`config`](/rules/lib/toplevel/config) -module. The module exposes an `exec` function which takes a single string -parameter which is the name of the exec group for which the dependency should be -built. - -As on native rules, the `test` execution group is present by default on Starlark -test rules. - -## Accessing execution groups - -In the rule implementation, you can declare that actions should be run on the -execution platform of an execution group. You can do this by using the `exec_group` -param of action generating methods, specifically [`ctx.actions.run`] -(/rules/lib/builtins/actions#run) and -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell). - -```python -# foo.bzl -def _impl(ctx): - ctx.actions.run( - inputs = [ctx.attr._some_tool, ctx.srcs[0]] - exec_group = "compile", - # ... - ) -``` - -Rule authors will also be able to access the [resolved toolchains](/extending/toolchains#toolchain-resolution) -of execution groups, similarly to how you -can access the resolved toolchain of a target: - -```python -# foo.bzl -def _impl(ctx): - foo_info = ctx.exec_groups["link"].toolchains["//foo:toolchain_type"].fooinfo - ctx.actions.run( - inputs = [foo_info, ctx.srcs[0]] - exec_group = "link", - # ... - ) -``` - -Note: If an action uses a toolchain from an execution group, but doesn't specify -that execution group in the action declaration, that may potentially cause -issues. A mismatch like this may not immediately cause failures, but is a latent -problem. - -## Using execution groups to set execution properties - -Execution groups are integrated with the -[`exec_properties`](/reference/be/common-definitions#common-attributes) -attribute that exists on every rule and allows the target writer to specify a -string dict of properties that is then passed to the execution machinery. For -example, if you wanted to set some property, say memory, for the target and give -certain actions a higher memory allocation, you would write an `exec_properties` -entry with an execution-group-augmented key, such as: - -```python -# BUILD -my_rule( - name = 'my_target', - exec_properties = { - 'mem': '12g', - 'link.mem': '16g' - } - … -) -``` - -All actions with `exec_group = "link"` would see the exec properties -dictionary as `{"mem": "16g"}`. As you see here, execution-group-level -settings override target-level settings. - -### Execution groups for native rules - -The following execution groups are available for actions defined by native rules: - -* `test`: Test runner actions. -* `cpp_link`: C++ linking actions. - -### Execution groups and platform execution properties - -It is possible to define `exec_properties` for arbitrary execution groups on -platform targets (unlike `exec_properties` set directly on a target, where -properties for unknown execution groups are rejected). Targets then inherit the -execution platform's `exec_properties` that affect the default execution group -and any other relevant execution groups. - -For example, suppose running a C++ test requires some resource to be available, -but it isn't required for compiling and linking; this can be modelled as -follows: - -```python -constraint_setting(name = "resource") -constraint_value(name = "has_resource", constraint_setting = ":resource") - -platform( - name = "platform_with_resource", - constraint_values = [":has_resource"], - exec_properties = { - "test.resource": "...", - }, -) - -cc_test( - name = "my_test", - srcs = ["my_test.cc"], - exec_compatible_with = [":has_resource"], -) -``` - -`exec_properties` defined directly on targets take precedence over those that -are inherited from the execution platform. diff --git a/8.4.2/extending/platforms.mdx b/8.4.2/extending/platforms.mdx deleted file mode 100644 index 94e6290..0000000 --- a/8.4.2/extending/platforms.mdx +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: 'Platforms' ---- - - - -Bazel can build and test code on a variety of hardware, operating systems, and -system configurations, using many different versions of build tools such as -linkers and compilers. To help manage this complexity, Bazel has a concept of -*constraints* and *platforms*. A constraint is a dimension in which build or -production environments may differ, such as CPU architecture, the presence or -absence of a GPU, or the version of a system-installed compiler. A platform is a -named collection of choices for these constraints, representing the particular -resources that are available in some environment. - -Modeling the environment as a platform helps Bazel to automatically select the -appropriate -[toolchains](/extending/toolchains) -for build actions. Platforms can also be used in combination with the -[config_setting](/reference/be/general#config_setting) -rule to write [configurable attributes](/docs/configurable-attributes). - -Bazel recognizes three roles that a platform may serve: - -* **Host** - the platform on which Bazel itself runs. -* **Execution** - a platform on which build tools execute build actions to - produce intermediate and final outputs. -* **Target** - a platform on which a final output resides and executes. - -Bazel supports the following build scenarios regarding platforms: - -* **Single-platform builds** (default) - host, execution, and target platforms - are the same. For example, building a Linux executable on Ubuntu running on - an Intel x64 CPU. - -* **Cross-compilation builds** - host and execution platforms are the same, but - the target platform is different. For example, building an iOS app on macOS - running on a MacBook Pro. - -* **Multi-platform builds** - host, execution, and target platforms are all - different. - -Tip: for detailed instructions on migrating your project to platforms, see -[Migrating to Platforms](/concepts/platforms). - -## Defining constraints and platforms - -The space of possible choices for platforms is defined by using the -[`constraint_setting`][constraint_setting] and -[`constraint_value`][constraint_value] rules within `BUILD` files. -`constraint_setting` creates a new dimension, while -`constraint_value` creates a new value for a given dimension; together they -effectively define an enum and its possible values. For example, the following -snippet of a `BUILD` file introduces a constraint for the system's glibc version -with two possible values. - -[constraint_setting]: /reference/be/platforms-and-toolchains#constraint_setting -[constraint_value]: /reference/be/platforms-and-toolchains#constraint_value - -```python -constraint_setting(name = "glibc_version") - -constraint_value( - name = "glibc_2_25", - constraint_setting = ":glibc_version", -) - -constraint_value( - name = "glibc_2_26", - constraint_setting = ":glibc_version", -) -``` - -Constraints and their values may be defined across different packages in the -workspace. They are referenced by label and subject to the usual visibility -controls. If visibility allows, you can extend an existing constraint setting by -defining your own value for it. - -The [`platform`](/reference/be/platforms-and-toolchains#platform) rule introduces a new platform with -certain choices of constraint values. The -following creates a platform named `linux_x86`, and says that it describes any -environment that runs a Linux operating system on an x86_64 architecture with a -glibc version of 2.25. (See below for more on Bazel's built-in constraints.) - -```python -platform( - name = "linux_x86", - constraint_values = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ":glibc_2_25", - ], -) -``` - -Note: It is an error for a platform to specify more than one value of the -same constraint setting, such as `@platforms//cpu:x86_64` and -`@platforms//cpu:arm` for `@platforms//cpu:cpu`. - -## Generally useful constraints and platforms - -To keep the ecosystem consistent, Bazel team maintains a repository with -constraint definitions for the most popular CPU architectures and operating -systems. These are all located in -[https://github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms). - -Bazel ships with the following special platform definition: -`@platforms//host` (aliased as `@bazel_tools//tools:host_platform`). This is the -autodetected host platform value - -represents autodetected platform for the system Bazel is running on. - -## Specifying a platform for a build - -You can specify the host and target platforms for a build using the following -command-line flags: - -* `--host_platform` - defaults to `@bazel_tools//tools:host_platform` - * This target is aliased to `@platforms//host`, which is backed by a repo - rule that detects the host OS and CPU and writes the platform target. - * There's also `@platforms//host:constraints.bzl`, which exposes - an array called `HOST_CONSTRAINTS`, which can be used in other BUILD and - Starlark files. -* `--platforms` - defaults to the host platform - * This means that when no other flags are set, - `@platforms//host` is the target platform. - * If `--host_platform` is set and not `--platforms`, the value of - `--host_platform` is both the host and target platform. - -## Skipping incompatible targets - -When building for a specific target platform it is often desirable to skip -targets that will never work on that platform. For example, your Windows device -driver is likely going to generate lots of compiler errors when building on a -Linux machine with `//...`. Use the -[`target_compatible_with`](/reference/be/common-definitions#common.target_compatible_with) -attribute to tell Bazel what target platform constraints your code has. - -The simplest use of this attribute restricts a target to a single platform. -The target will not be built for any platform that doesn't satisfy all of the -constraints. The following example restricts `win_driver_lib.cc` to 64-bit -Windows. - -```python -cc_library( - name = "win_driver_lib", - srcs = ["win_driver_lib.cc"], - target_compatible_with = [ - "@platforms//cpu:x86_64", - "@platforms//os:windows", - ], -) -``` - -`:win_driver_lib` is *only* compatible for building with 64-bit Windows and -incompatible with all else. Incompatibility is transitive. Any targets -that transitively depend on an incompatible target are themselves considered -incompatible. - -### When are targets skipped? - -Targets are skipped when they are considered incompatible and included in the -build as part of a target pattern expansion. For example, the following two -invocations skip any incompatible targets found in a target pattern expansion. - -```console -$ bazel build --platforms=//:myplatform //... -``` - -```console -$ bazel build --platforms=//:myplatform //:all -``` - -Incompatible tests in a [`test_suite`](/reference/be/general#test_suite) are -similarly skipped if the `test_suite` is specified on the command line with -[`--expand_test_suites`](/reference/command-line-reference#flag--expand_test_suites). -In other words, `test_suite` targets on the command line behave like `:all` and -`...`. Using `--noexpand_test_suites` prevents expansion and causes -`test_suite` targets with incompatible tests to also be incompatible. - -Explicitly specifying an incompatible target on the command line results in an -error message and a failed build. - -```console -$ bazel build --platforms=//:myplatform //:target_incompatible_with_myplatform -... -ERROR: Target //:target_incompatible_with_myplatform is incompatible and cannot be built, but was explicitly requested. -... -FAILED: Build did NOT complete successfully -``` - -Incompatible explicit targets are silently skipped if -`--skip_incompatible_explicit_targets` is enabled. - -### More expressive constraints - -For more flexibility in expressing constraints, use the -`@platforms//:incompatible` -[`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) -that no platform satisfies. - -Use [`select()`](/reference/be/functions#select) in combination with -`@platforms//:incompatible` to express more complicated restrictions. For -example, use it to implement basic OR logic. The following marks a library -compatible with macOS and Linux, but no other platforms. - -Note: An empty constraints list is equivalent to "compatible with everything". - -```python -cc_library( - name = "unixish_lib", - srcs = ["unixish_lib.cc"], - target_compatible_with = select({ - "@platforms//os:osx": [], - "@platforms//os:linux": [], - "//conditions:default": ["@platforms//:incompatible"], - }), -) -``` - -The above can be interpreted as follows: - -1. When targeting macOS, the target has no constraints. -2. When targeting Linux, the target has no constraints. -3. Otherwise, the target has the `@platforms//:incompatible` constraint. Because - `@platforms//:incompatible` is not part of any platform, the target is - deemed incompatible. - -To make your constraints more readable, use -[skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`selects.with_or()`](https://github.com/bazelbuild/bazel-skylib/blob/main/docs/selects_doc.md#selectswith_or). - -You can express inverse compatibility in a similar way. The following example -describes a library that is compatible with everything _except_ for ARM. - -```python -cc_library( - name = "non_arm_lib", - srcs = ["non_arm_lib.cc"], - target_compatible_with = select({ - "@platforms//cpu:arm": ["@platforms//:incompatible"], - "//conditions:default": [], - }), -) -``` - -### Detecting incompatible targets using `bazel cquery` - -You can use the -[`IncompatiblePlatformProvider`](/rules/lib/providers/IncompatiblePlatformProvider) -in `bazel cquery`'s [Starlark output -format](/query/cquery#output-format-definition) to distinguish -incompatible targets from compatible ones. - -This can be used to filter out incompatible targets. The example below will -only print the labels for targets that are compatible. Incompatible targets are -not printed. - -```console -$ cat example.cquery - -def format(target): - if "IncompatiblePlatformProvider" not in providers(target): - return target.label - return "" - - -$ bazel cquery //... --output=starlark --starlark:file=example.cquery -``` - -### Known Issues - -Incompatible targets [ignore visibility -restrictions](https://github.com/bazelbuild/bazel/issues/16044). diff --git a/8.4.2/extending/repo.mdx b/8.4.2/extending/repo.mdx deleted file mode 100644 index b878f03..0000000 --- a/8.4.2/extending/repo.mdx +++ /dev/null @@ -1,161 +0,0 @@ ---- -title: 'Repository Rules' ---- - - - -This page covers how to define repository rules and provides examples for more -details. - -An [external repository](/external/overview#repository) is a directory tree, -containing source files usable in a Bazel build, which is generated on demand by -running its corresponding **repo rule**. Repos can be defined in a multitude of -ways, but ultimately, each repo is defined by invoking a repo rule, just as -build targets are defined by invoking build rules. They can be used to depend on -third-party libraries (such as Maven packaged libraries) but also to generate -`BUILD` files specific to the host Bazel is running on. - -## Repository rule definition - -In a `.bzl` file, use the -[repository_rule](/rules/lib/globals/bzl#repository_rule) function to define a -new repo rule and store it in a global variable. After a repo rule is defined, -it can be invoked as a function to define repos. This invocation is usually -performed from inside a [module extension](/external/extension) implementation -function. - -The two major components of a repo rule definition are its attribute schema and -implementation function. The attribute schema determines the names and types of -attributes passed to a repo rule invocation, and the implementation function is -run when the repo needs to be fetched. - -## Attributes - -Attributes are arguments passed to the repo rule invocation. The schema of -attributes accepted by a repo rule is specified using the `attrs` argument when -the repo rule is defined with a call to `repository_rule`. An example defining -`url` and `sha256` attributes as strings: - -```python -http_archive = repository_rule( - implementation=_impl, - attrs={ - "url": attr.string(mandatory=True), - "sha256": attr.string(mandatory=True), - } -) -``` - -To access an attribute within the implementation function, use -`repository_ctx.attr.`: - -```python -def _impl(repository_ctx): - url = repository_ctx.attr.url - checksum = repository_ctx.attr.sha256 -``` - -All `repository_rule`s have the implicitly defined attribute `name`. This is a -string attribute that behaves somewhat magically: when specified as an input to -a repo rule invocation, it takes an apparent repo name; but when read from the -repo rule's implementation function using `repository_ctx.attr.name`, it returns -the canonical repo name. - -## Implementation function - -Every repo rule requires an `implementation` function. It contains the actual -logic of the rule and is executed strictly in the Loading Phase. - -The function has exactly one input parameter, `repository_ctx`. The function -returns either `None` to signify that the rule is reproducible given the -specified parameters, or a dict with a set of parameters for that rule that -would turn that rule into a reproducible one generating the same repo. For -example, for a rule tracking a git repository that would mean returning a -specific commit identifier instead of a floating branch that was originally -specified. - -The input parameter `repository_ctx` can be used to access attribute values, and -non-hermetic functions (finding a binary, executing a binary, creating a file in -the repository or downloading a file from the Internet). See [the API -docs](/rules/lib/builtins/repository_ctx) for more context. Example: - -```python -def _impl(repository_ctx): - repository_ctx.symlink(repository_ctx.attr.path, "") - -local_repository = repository_rule( - implementation=_impl, - ...) -``` - -## When is the implementation function executed? - -The implementation function of a repo rule is executed when Bazel needs a target -from that repository, for example when another target (in another repo) depends -on it or if it is mentioned on the command line. The implementation function is -then expected to create the repo in the file system. This is called "fetching" -the repo. - -In contrast to regular targets, repos are not necessarily re-fetched when -something changes that would cause the repo to be different. This is because -there are things that Bazel either cannot detect changes to or it would cause -too much overhead on every build (for example, things that are fetched from the -network). Therefore, repos are re-fetched only if one of the following things -changes: - -* The attributes passed to the repo rule invocation. -* The Starlark code comprising the implementation of the repo rule. -* The value of any environment variable passed to `repository_ctx`'s - `getenv()` method or declared with the `environ` attribute of the - [`repository_rule`](/rules/lib/globals/bzl#repository_rule). The values of - these environment variables can be hard-wired on the command line with the - [`--repo_env`](/reference/command-line-reference#flag--repo_env) flag. -* The existence, contents, and type of any paths being - [`watch`ed](/rules/lib/builtins/repository_ctx#watch) in the implementation - function of the repo rule. - * Certain other methods of `repository_ctx` with a `watch` parameter, such - as `read()`, `execute()`, and `extract()`, can also cause paths to be - watched. - * Similarly, [`repository_ctx.watch_tree`](/rules/lib/builtins/repository_ctx#watch_tree) - and [`path.readdir`](/rules/lib/builtins/path#readdir) can cause paths - to be watched in other ways. -* When `bazel fetch --force` is executed. - -There are two parameters of `repository_rule` that control when the repositories -are re-fetched: - -* If the `configure` flag is set, the repository is re-fetched on `bazel - fetch --force --configure` (non-`configure` repositories are not - re-fetched). -* If the `local` flag is set, in addition to the above cases, the repo is also - re-fetched when the Bazel server restarts. - -## Forcing refetch of external repos - -Sometimes, an external repo can become outdated without any change to its -definition or dependencies. For example, a repo fetching sources might follow a -particular branch of a third-party repository, and new commits are available on -that branch. In this case, you can ask bazel to refetch all external repos -unconditionally by calling `bazel fetch --force --all`. - -Moreover, some repo rules inspect the local machine and might become outdated if -the local machine was upgraded. Here you can ask Bazel to only refetch those -external repos where the [`repository_rule`](/rules/lib/globals#repository_rule) -definition has the `configure` attribute set, use `bazel fetch --force ---configure`. - -## Examples - -- [C++ auto-configured - toolchain](https://cs.opensource.google/bazel/bazel/+/master:tools/cpp/cc_configure.bzl;drc=644b7d41748e09eff9e47cbab2be2263bb71f29a;l=176): - it uses a repo rule to automatically create the C++ configuration files for - Bazel by looking for the local C++ compiler, the environment and the flags - the C++ compiler supports. - -- [Go repositories](https://github.com/bazelbuild/rules_go/blob/67bc217b6210a0922d76d252472b87e9a6118fdf/go/private/go_repositories.bzl#L195) - uses several `repository_rule` to defines the list of dependencies needed to - use the Go rules. - -- [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) - creates an external repository called `@maven` by default that generates - build targets for every Maven artifact in the transitive dependency tree. diff --git a/8.4.2/extending/rules.mdx b/8.4.2/extending/rules.mdx deleted file mode 100644 index 609d719..0000000 --- a/8.4.2/extending/rules.mdx +++ /dev/null @@ -1,1281 +0,0 @@ ---- -title: 'Rules' ---- - - - -A **rule** defines a series of [**actions**](#actions) that Bazel performs on -inputs to produce a set of outputs, which are referenced in -[**providers**](#providers) returned by the rule's -[**implementation function**](#implementation_function). For example, a C++ -binary rule might: - -1. Take a set of `.cpp` source files (inputs). -2. Run `g++` on the source files (action). -3. Return the `DefaultInfo` provider with the executable output and other files - to make available at runtime. -4. Return the `CcInfo` provider with C++-specific information gathered from the - target and its dependencies. - -From Bazel's perspective, `g++` and the standard C++ libraries are also inputs -to this rule. As a rule writer, you must consider not only the user-provided -inputs to a rule, but also all of the tools and libraries required to execute -the actions. - -Before creating or modifying any rule, ensure you are familiar with Bazel's -[build phases](/extending/concepts). It is important to understand the three -phases of a build (loading, analysis, and execution). It is also useful to -learn about [macros](/extending/macros) to understand the difference between rules and -macros. To get started, first review the [Rules Tutorial](/rules/rules-tutorial). -Then, use this page as a reference. - -A few rules are built into Bazel itself. These *native rules*, such as -`genrule` and `filegroup`, provide some core support. -By defining your own rules, you can add support for languages and tools -that Bazel doesn't support natively. - -Bazel provides an extensibility model for writing rules using the -[Starlark](/rules/language) language. These rules are written in `.bzl` files, which -can be loaded directly from `BUILD` files. - -When defining your own rule, you get to decide what attributes it supports and -how it generates its outputs. - -The rule's `implementation` function defines its exact behavior during the -[analysis phase](/extending/concepts#evaluation-model). This function doesn't run any -external commands. Rather, it registers [actions](#actions) that will be used -later during the execution phase to build the rule's outputs, if they are -needed. - -## Rule creation - -In a `.bzl` file, use the [rule](/rules/lib/globals/bzl#rule) function to define a new -rule, and store the result in a global variable. The call to `rule` specifies -[attributes](#attributes) and an -[implementation function](#implementation_function): - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "deps": attr.label_list(), - ... - }, -) -``` - -This defines a [rule kind](/query/language#kind) named `example_library`. - -The call to `rule` also must specify if the rule creates an -[executable](#executable-rules) output (with `executable = True`), or specifically -a test executable (with `test = True`). If the latter, the rule is a *test rule*, -and the name of the rule must end in `_test`. - -## Target instantiation - -Rules can be [loaded](/concepts/build-files#load) and called in `BUILD` files: - -```python -load('//some/pkg:rules.bzl', 'example_library') - -example_library( - name = "example_target", - deps = [":another_target"], - ... -) -``` - -Each call to a build rule returns no value, but has the side effect of defining -a target. This is called *instantiating* the rule. This specifies a name for the -new target and values for the target's [attributes](#attributes). - -Rules can also be called from Starlark functions and loaded in `.bzl` files. -Starlark functions that call rules are called [Starlark macros](/extending/macros). -Starlark macros must ultimately be called from `BUILD` files, and can only be -called during the [loading phase](/extending/concepts#evaluation-model), when `BUILD` -files are evaluated to instantiate targets. - -## Attributes - -An *attribute* is a rule argument. Attributes can provide specific values to a -target's [implementation](#implementation_function), or they can refer to other -targets, creating a graph of dependencies. - -Rule-specific attributes, such as `srcs` or `deps`, are defined by passing a map -from attribute names to schemas (created using the [`attr`](/rules/lib/toplevel/attr) -module) to the `attrs` parameter of `rule`. -[Common attributes](/reference/be/common-definitions#common-attributes), such as -`name` and `visibility`, are implicitly added to all rules. Additional -attributes are implicitly added to -[executable and test rules](#executable-rules) specifically. Attributes which -are implicitly added to a rule can't be included in the dictionary passed to -`attrs`. - -### Dependency attributes - -Rules that process source code usually define the following attributes to handle -various [types of dependencies](/concepts/dependencies#types_of_dependencies): - -* `srcs` specifies source files processed by a target's actions. Often, the - attribute schema specifies which file extensions are expected for the sort - of source file the rule processes. Rules for languages with header files - generally specify a separate `hdrs` attribute for headers processed by a - target and its consumers. -* `deps` specifies code dependencies for a target. The attribute schema should - specify which [providers](#providers) those dependencies must provide. (For - example, `cc_library` provides `CcInfo`.) -* `data` specifies files to be made available at runtime to any executable - which depends on a target. That should allow arbitrary files to be - specified. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - "srcs": attr.label_list(allow_files = [".example"]), - "hdrs": attr.label_list(allow_files = [".header"]), - "deps": attr.label_list(providers = [ExampleInfo]), - "data": attr.label_list(allow_files = True), - ... - }, -) -``` - -These are examples of *dependency attributes*. Any attribute that specifies -an input label (those defined with -[`attr.label_list`](/rules/lib/toplevel/attr#label_list), -[`attr.label`](/rules/lib/toplevel/attr#label), or -[`attr.label_keyed_string_dict`](/rules/lib/toplevel/attr#label_keyed_string_dict)) -specifies dependencies of a certain type -between a target and the targets whose labels (or the corresponding -[`Label`](/rules/lib/builtins/Label) objects) are listed in that attribute when the target -is defined. The repository, and possibly the path, for these labels is resolved -relative to the defined target. - -```python -example_library( - name = "my_target", - deps = [":other_target"], -) - -example_library( - name = "other_target", - ... -) -``` - -In this example, `other_target` is a dependency of `my_target`, and therefore -`other_target` is analyzed first. It is an error if there is a cycle in the -dependency graph of targets. - - - -### Private attributes and implicit dependencies - -A dependency attribute with a default value creates an *implicit dependency*. It -is implicit because it's a part of the target graph that the user doesn't -specify it in a `BUILD` file. Implicit dependencies are useful for hard-coding a -relationship between a rule and a *tool* (a build-time dependency, such as a -compiler), since most of the time a user is not interested in specifying what -tool the rule uses. Inside the rule's implementation function, this is treated -the same as other dependencies. - -If you want to provide an implicit dependency without allowing the user to -override that value, you can make the attribute *private* by giving it a name -that begins with an underscore (`_`). Private attributes must have default -values. It generally only makes sense to use private attributes for implicit -dependencies. - -```python -example_library = rule( - implementation = _example_library_impl, - attrs = { - ... - "_compiler": attr.label( - default = Label("//tools:example_compiler"), - allow_single_file = True, - executable = True, - cfg = "exec", - ), - }, -) -``` - -In this example, every target of type `example_library` has an implicit -dependency on the compiler `//tools:example_compiler`. This allows -`example_library`'s implementation function to generate actions that invoke the -compiler, even though the user did not pass its label as an input. Since -`_compiler` is a private attribute, it follows that `ctx.attr._compiler` -will always point to `//tools:example_compiler` in all targets of this rule -type. Alternatively, you can name the attribute `compiler` without the -underscore and keep the default value. This allows users to substitute a -different compiler if necessary, but it requires no awareness of the compiler's -label. - -Implicit dependencies are generally used for tools that reside in the same -repository as the rule implementation. If the tool comes from the -[execution platform](/extending/platforms) or a different repository instead, the -rule should obtain that tool from a [toolchain](/extending/toolchains). - -### Output attributes - -*Output attributes*, such as [`attr.output`](/rules/lib/toplevel/attr#output) and -[`attr.output_list`](/rules/lib/toplevel/attr#output_list), declare an output file that the -target generates. These differ from dependency attributes in two ways: - -* They define output file targets instead of referring to targets defined - elsewhere. -* The output file targets depend on the instantiated rule target, instead of - the other way around. - -Typically, output attributes are only used when a rule needs to create outputs -with user-defined names which can't be based on the target name. If a rule has -one output attribute, it is typically named `out` or `outs`. - -Output attributes are the preferred way of creating *predeclared outputs*, which -can be specifically depended upon or -[requested at the command line](#requesting_output_files). - -## Implementation function - -Every rule requires an `implementation` function. These functions are executed -strictly in the [analysis phase](/extending/concepts#evaluation-model) and transform the -graph of targets generated in the loading phase into a graph of -[actions](#actions) to be performed during the execution phase. As such, -implementation functions can't actually read or write files. - -Rule implementation functions are usually private (named with a leading -underscore). Conventionally, they are named the same as their rule, but suffixed -with `_impl`. - -Implementation functions take exactly one parameter: a -[rule context](/rules/lib/builtins/ctx), conventionally named `ctx`. They return a list of -[providers](#providers). - -### Targets - -Dependencies are represented at analysis time as [`Target`](/rules/lib/builtins/Target) -objects. These objects contain the [providers](#providers) generated when the -target's implementation function was executed. - -[`ctx.attr`](/rules/lib/builtins/ctx#attr) has fields corresponding to the names of each -dependency attribute, containing `Target` objects representing each direct -dependency using that attribute. For `label_list` attributes, this is a list of -`Targets`. For `label` attributes, this is a single `Target` or `None`. - -A list of provider objects are returned by a target's implementation function: - -```python -return [ExampleInfo(headers = depset(...))] -``` - -Those can be accessed using index notation (`[]`), with the type of provider as -a key. These can be [custom providers](#custom_providers) defined in Starlark or -[providers for native rules](/rules/lib/providers) available as Starlark -global variables. - -For example, if a rule takes header files using a `hdrs` attribute and provides -them to the compilation actions of the target and its consumers, it could -collect them like so: - -```python -def _example_library_impl(ctx): - ... - transitive_headers = [hdr[ExampleInfo].headers for hdr in ctx.attr.hdrs] -``` - -There's a legacy struct style, which is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -### Files - -Files are represented by [`File`](/rules/lib/builtins/File) objects. Since Bazel doesn't -perform file I/O during the analysis phase, these objects can't be used to -directly read or write file content. Rather, they are passed to action-emitting -functions (see [`ctx.actions`](/rules/lib/builtins/actions)) to construct pieces of the -action graph. - -A `File` can either be a source file or a generated file. Each generated file -must be an output of exactly one action. Source files can't be the output of -any action. - -For each dependency attribute, the corresponding field of -[`ctx.files`](/rules/lib/builtins/ctx#files) contains a list of the default outputs of all -dependencies using that attribute: - -```python -def _example_library_impl(ctx): - ... - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - ... -``` - -[`ctx.file`](/rules/lib/builtins/ctx#file) contains a single `File` or `None` for -dependency attributes whose specs set `allow_single_file = True`. -[`ctx.executable`](/rules/lib/builtins/ctx#executable) behaves the same as `ctx.file`, but only -contains fields for dependency attributes whose specs set `executable = True`. - -### Declaring outputs - -During the analysis phase, a rule's implementation function can create outputs. -Since all labels have to be known during the loading phase, these additional -outputs have no labels. `File` objects for outputs can be created using -[`ctx.actions.declare_file`](/rules/lib/builtins/actions#declare_file) and -[`ctx.actions.declare_directory`](/rules/lib/builtins/actions#declare_directory). -Often, the names of outputs are based on the target's name, -[`ctx.label.name`](/rules/lib/builtins/ctx#label): - -```python -def _example_library_impl(ctx): - ... - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - ... -``` - -For *predeclared outputs*, like those created for -[output attributes](#output_attributes), `File` objects instead can be retrieved -from the corresponding fields of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). - -### Actions - -An action describes how to generate a set of outputs from a set of inputs, for -example "run gcc on hello.c and get hello.o". When an action is created, Bazel -doesn't run the command immediately. It registers it in a graph of dependencies, -because an action can depend on the output of another action. For example, in C, -the linker must be called after the compiler. - -General-purpose functions that create actions are defined in -[`ctx.actions`](/rules/lib/builtins/actions): - -* [`ctx.actions.run`](/rules/lib/builtins/actions#run), to run an executable. -* [`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell), to run a shell - command. -* [`ctx.actions.write`](/rules/lib/builtins/actions#write), to write a string to a file. -* [`ctx.actions.expand_template`](/rules/lib/builtins/actions#expand_template), to - generate a file from a template. - -[`ctx.actions.args`](/rules/lib/builtins/actions#args) can be used to efficiently -accumulate the arguments for actions. It avoids flattening depsets until -execution time: - -```python -def _example_library_impl(ctx): - ... - - transitive_headers = [dep[ExampleInfo].headers for dep in ctx.attr.deps] - headers = depset(ctx.files.hdrs, transitive = transitive_headers) - srcs = ctx.files.srcs - inputs = depset(srcs, transitive = [headers]) - output_file = ctx.actions.declare_file(ctx.label.name + ".output") - - args = ctx.actions.args() - args.add_joined("-h", headers, join_with = ",") - args.add_joined("-s", srcs, join_with = ",") - args.add("-o", output_file) - - ctx.actions.run( - mnemonic = "ExampleCompile", - executable = ctx.executable._compiler, - arguments = [args], - inputs = inputs, - outputs = [output_file], - ) - ... -``` - -Actions take a list or depset of input files and generate a (non-empty) list of -output files. The set of input and output files must be known during the -[analysis phase](/extending/concepts#evaluation-model). It might depend on the value of -attributes, including providers from dependencies, but it can't depend on the -result of the execution. For example, if your action runs the unzip command, you -must specify which files you expect to be inflated (before running unzip). -Actions which create a variable number of files internally can wrap those in a -single file (such as a zip, tar, or other archive format). - -Actions must list all of their inputs. Listing inputs that are not used is -permitted, but inefficient. - -Actions must create all of their outputs. They may write other files, but -anything not in outputs won't be available to consumers. All declared outputs -must be written by some action. - -Actions are comparable to pure functions: They should depend only on the -provided inputs, and avoid accessing computer information, username, clock, -network, or I/O devices (except for reading inputs and writing outputs). This is -important because the output will be cached and reused. - -Dependencies are resolved by Bazel, which decides which actions to -execute. It is an error if there is a cycle in the dependency graph. Creating -an action doesn't guarantee that it will be executed, that depends on whether -its outputs are needed for the build. - -### Providers - -Providers are pieces of information that a rule exposes to other rules that -depend on it. This data can include output files, libraries, parameters to pass -on a tool's command line, or anything else a target's consumers should know -about. - -Since a rule's implementation function can only read providers from the -instantiated target's immediate dependencies, rules need to forward any -information from a target's dependencies that needs to be known by a target's -consumers, generally by accumulating that into a [`depset`](/rules/lib/builtins/depset). - -A target's providers are specified by a list of provider objects returned by -the implementation function. - -Old implementation functions can also be written in a legacy style where the -implementation function returns a [`struct`](/rules/lib/builtins/struct) instead of list of -provider objects. This style is strongly discouraged and rules should be -[migrated away from it](#migrating_from_legacy_providers). - -#### Default outputs - -A target's *default outputs* are the outputs that are requested by default when -the target is requested for build at the command line. For example, a -`java_library` target `//pkg:foo` has `foo.jar` as a default output, so that -will be built by the command `bazel build //pkg:foo`. - -Default outputs are specified by the `files` parameter of -[`DefaultInfo`](/rules/lib/providers/DefaultInfo): - -```python -def _example_library_impl(ctx): - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - ... - ] -``` - -If `DefaultInfo` is not returned by a rule implementation or the `files` -parameter is not specified, `DefaultInfo.files` defaults to all -*predeclared outputs* (generally, those created by [output -attributes](#output_attributes)). - -Rules that perform actions should provide default outputs, even if those outputs -are not expected to be directly used. Actions that are not in the graph of the -requested outputs are pruned. If an output is only used by a target's consumers, -those actions won't be performed when the target is built in isolation. This -makes debugging more difficult because rebuilding just the failing target won't -reproduce the failure. - -#### Runfiles - -Runfiles are a set of files used by a target at runtime (as opposed to build -time). During the [execution phase](/extending/concepts#evaluation-model), Bazel creates -a directory tree containing symlinks pointing to the runfiles. This stages the -environment for the binary so it can access the runfiles during runtime. - -Runfiles can be added manually during rule creation. -[`runfiles`](/rules/lib/builtins/runfiles) objects can be created by the `runfiles` method -on the rule context, [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and passed to the -`runfiles` parameter on `DefaultInfo`. The executable output of -[executable rules](#executable-rules) is implicitly added to the runfiles. - -Some rules specify attributes, generally named -[`data`](/reference/be/common-definitions#common.data), whose outputs are added to -a targets' runfiles. Runfiles should also be merged in from `data`, as well as -from any attributes which might provide code for eventual execution, generally -`srcs` (which might contain `filegroup` targets with associated `data`) and -`deps`. - -```python -def _example_library_impl(ctx): - ... - runfiles = ctx.runfiles(files = ctx.files.data) - transitive_runfiles = [] - for runfiles_attr in ( - ctx.attr.srcs, - ctx.attr.hdrs, - ctx.attr.deps, - ctx.attr.data, - ): - for target in runfiles_attr: - transitive_runfiles.append(target[DefaultInfo].default_runfiles) - runfiles = runfiles.merge_all(transitive_runfiles) - return [ - DefaultInfo(..., runfiles = runfiles), - ... - ] -``` - -#### Custom providers - -Providers can be defined using the [`provider`](/rules/lib/globals/bzl#provider) -function to convey rule-specific information: - -```python -ExampleInfo = provider( - "Info needed to compile/link Example code.", - fields = { - "headers": "depset of header Files from transitive dependencies.", - "files_to_link": "depset of Files from compilation.", - }, -) -``` - -Rule implementation functions can then construct and return provider instances: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - ExampleInfo( - headers = headers, - files_to_link = depset( - [output_file], - transitive = [ - dep[ExampleInfo].files_to_link for dep in ctx.attr.deps - ], - ), - ) - ] -``` - -##### Custom initialization of providers - -It's possible to guard the instantiation of a provider with custom -preprocessing and validation logic. This can be used to ensure that all -provider instances satisfy certain invariants, or to give users a cleaner API for -obtaining an instance. - -This is done by passing an `init` callback to the -[`provider`](/rules/lib/globals/bzl.html#provider) function. If this callback is given, the -return type of `provider()` changes to be a tuple of two values: the provider -symbol that is the ordinary return value when `init` is not used, and a "raw -constructor". - -In this case, when the provider symbol is called, instead of directly returning -a new instance, it will forward the arguments along to the `init` callback. The -callback's return value must be a dict mapping field names (strings) to values; -this is used to initialize the fields of the new instance. Note that the -callback may have any signature, and if the arguments don't match the signature -an error is reported as if the callback were invoked directly. - -The raw constructor, by contrast, will bypass the `init` callback. - -The following example uses `init` to preprocess and validate its arguments: - -```python -# //pkg:exampleinfo.bzl - -_core_headers = [...] # private constant representing standard library files - -# Keyword-only arguments are preferred. -def _exampleinfo_init(*, files_to_link, headers = None, allow_empty_files_to_link = False): - if not files_to_link and not allow_empty_files_to_link: - fail("files_to_link may not be empty") - all_headers = depset(_core_headers, transitive = headers) - return {"files_to_link": files_to_link, "headers": all_headers} - -ExampleInfo, _new_exampleinfo = provider( - fields = ["files_to_link", "headers"], - init = _exampleinfo_init, -) -``` - -A rule implementation may then instantiate the provider as follows: - -```python -ExampleInfo( - files_to_link = my_files_to_link, # may not be empty - headers = my_headers, # will automatically include the core headers -) -``` - -The raw constructor can be used to define alternative public factory functions -that don't go through the `init` logic. For example, exampleinfo.bzl -could define: - -```python -def make_barebones_exampleinfo(headers): - """Returns an ExampleInfo with no files_to_link and only the specified headers.""" - return _new_exampleinfo(files_to_link = depset(), headers = all_headers) -``` - -Typically, the raw constructor is bound to a variable whose name begins with an -underscore (`_new_exampleinfo` above), so that user code can't load it and -generate arbitrary provider instances. - -Another use for `init` is to prevent the user from calling the provider -symbol altogether, and force them to use a factory function instead: - -```python -def _exampleinfo_init_banned(*args, **kwargs): - fail("Do not call ExampleInfo(). Use make_exampleinfo() instead.") - -ExampleInfo, _new_exampleinfo = provider( - ... - init = _exampleinfo_init_banned) - -def make_exampleinfo(...): - ... - return _new_exampleinfo(...) -``` - - - -## Executable rules and test rules - -Executable rules define targets that can be invoked by a `bazel run` command. -Test rules are a special kind of executable rule whose targets can also be -invoked by a `bazel test` command. Executable and test rules are created by -setting the respective [`executable`](/rules/lib/globals/bzl#rule.executable) or -[`test`](/rules/lib/globals/bzl#rule.test) argument to `True` in the call to `rule`: - -```python -example_binary = rule( - implementation = _example_binary_impl, - executable = True, - ... -) - -example_test = rule( - implementation = _example_binary_impl, - test = True, - ... -) -``` - -Test rules must have names that end in `_test`. (Test *target* names also often -end in `_test` by convention, but this is not required.) Non-test rules must not -have this suffix. - -Both kinds of rules must produce an executable output file (which may or may not -be predeclared) that will be invoked by the `run` or `test` commands. To tell -Bazel which of a rule's outputs to use as this executable, pass it as the -`executable` argument of a returned [`DefaultInfo`](/rules/lib/providers/DefaultInfo) -provider. That `executable` is added to the default outputs of the rule (so you -don't need to pass that to both `executable` and `files`). It's also implicitly -added to the [runfiles](#runfiles): - -```python -def _example_binary_impl(ctx): - executable = ctx.actions.declare_file(ctx.label.name) - ... - return [ - DefaultInfo(executable = executable, ...), - ... - ] -``` - -The action that generates this file must set the executable bit on the file. For -a [`ctx.actions.run`](/rules/lib/builtins/actions#run) or -[`ctx.actions.run_shell`](/rules/lib/builtins/actions#run_shell) action this should be done -by the underlying tool that is invoked by the action. For a -[`ctx.actions.write`](/rules/lib/builtins/actions#write) action, pass `is_executable = True`. - -As [legacy behavior](#deprecated_predeclared_outputs), executable rules have a -special `ctx.outputs.executable` predeclared output. This file serves as the -default executable if you don't specify one using `DefaultInfo`; it must not be -used otherwise. This output mechanism is deprecated because it doesn't support -customizing the executable file's name at analysis time. - -See examples of an -[executable rule](https://github.com/bazelbuild/examples/blob/main/rules/executable/fortune.bzl) -and a -[test rule](https://github.com/bazelbuild/examples/blob/main/rules/test_rule/line_length.bzl). - -[Executable rules](/reference/be/common-definitions#common-attributes-binaries) and -[test rules](/reference/be/common-definitions#common-attributes-tests) have additional -attributes implicitly defined, in addition to those added for -[all rules](/reference/be/common-definitions#common-attributes). The defaults of -implicitly-added attributes can't be changed, though this can be worked around -by wrapping a private rule in a [Starlark macro](/extending/macros) which alters the -default: - -```python -def example_test(size = "small", **kwargs): - _example_test(size = size, **kwargs) - -_example_test = rule( - ... -) -``` - -### Runfiles location - -When an executable target is run with `bazel run` (or `test`), the root of the -runfiles directory is adjacent to the executable. The paths relate as follows: - -```python -# Given launcher_path and runfile_file: -runfiles_root = launcher_path.path + ".runfiles" -workspace_name = ctx.workspace_name -runfile_path = runfile_file.short_path -execution_root_relative_path = "%s/%s/%s" % ( - runfiles_root, workspace_name, runfile_path) -``` - -The path to a `File` under the runfiles directory corresponds to -[`File.short_path`](/rules/lib/builtins/File#short_path). - -The binary executed directly by `bazel` is adjacent to the root of the -`runfiles` directory. However, binaries called *from* the runfiles can't make -the same assumption. To mitigate this, each binary should provide a way to -accept its runfiles root as a parameter using an environment, or command line -argument or flag. This allows binaries to pass the correct canonical runfiles root -to the binaries it calls. If that's not set, a binary can guess that it was the -first binary called and look for an adjacent runfiles directory. - -## Advanced topics - -### Requesting output files - -A single target can have several output files. When a `bazel build` command is -run, some of the outputs of the targets given to the command are considered to -be *requested*. Bazel only builds these requested files and the files that they -directly or indirectly depend on. (In terms of the action graph, Bazel only -executes the actions that are reachable as transitive dependencies of the -requested files.) - -In addition to [default outputs](#default_outputs), any *predeclared output* can -be explicitly requested on the command line. Rules can specify predeclared -outputs using [output attributes](#output_attributes). In that case, the user -explicitly chooses labels for outputs when they instantiate the rule. To obtain -[`File`](/rules/lib/builtins/File) objects for output attributes, use the corresponding -attribute of [`ctx.outputs`](/rules/lib/builtins/ctx#outputs). Rules can -[implicitly define predeclared outputs](#deprecated_predeclared_outputs) based -on the target name as well, but this feature is deprecated. - -In addition to default outputs, there are *output groups*, which are collections -of output files that may be requested together. These can be requested with -[`--output_groups`](/reference/command-line-reference#flag--output_groups). For -example, if a target `//pkg:mytarget` is of a rule type that has a `debug_files` -output group, these files can be built by running `bazel build //pkg:mytarget ---output_groups=debug_files`. Since non-predeclared outputs don't have labels, -they can only be requested by appearing in the default outputs or an output -group. - -Output groups can be specified with the -[`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo) provider. Note that unlike many -built-in providers, `OutputGroupInfo` can take parameters with arbitrary names -to define output groups with that name: - -```python -def _example_library_impl(ctx): - ... - debug_file = ctx.actions.declare_file(name + ".pdb") - ... - return [ - DefaultInfo(files = depset([output_file]), ...), - OutputGroupInfo( - debug_files = depset([debug_file]), - all_files = depset([output_file, debug_file]), - ), - ... - ] -``` - -Also unlike most providers, `OutputGroupInfo` can be returned by both an -[aspect](/extending/aspects) and the rule target to which that aspect is applied, as -long as they don't define the same output groups. In that case, the resulting -providers are merged. - -Note that `OutputGroupInfo` generally shouldn't be used to convey specific sorts -of files from a target to the actions of its consumers. Define -[rule-specific providers](#custom_providers) for that instead. - -### Configurations - -Imagine that you want to build a C++ binary for a different architecture. The -build can be complex and involve multiple steps. Some of the intermediate -binaries, like compilers and code generators, have to run on -[the execution platform](/extending/platforms#overview) (which could be your host, -or a remote executor). Some binaries like the final output must be built for the -target architecture. - -For this reason, Bazel has a concept of "configurations" and transitions. The -topmost targets (the ones requested on the command line) are built-in the -"target" configuration, while tools that should run on the execution platform -are built-in an "exec" configuration. Rules may generate different actions based -on the configuration, for instance to change the cpu architecture that is passed -to the compiler. In some cases, the same library may be needed for different -configurations. If this happens, it will be analyzed and potentially built -multiple times. - -By default, Bazel builds a target's dependencies in the same configuration as -the target itself, in other words without transitions. When a dependency is a -tool that's needed to help build the target, the corresponding attribute should -specify a transition to an exec configuration. This causes the tool and all its -dependencies to build for the execution platform. - -For each dependency attribute, you can use `cfg` to decide if dependencies -should build in the same configuration or transition to an exec configuration. -If a dependency attribute has the flag `executable = True`, `cfg` must be set -explicitly. This is to guard against accidentally building a tool for the wrong -configuration. -[See example](https://github.com/bazelbuild/examples/blob/main/rules/actions_run/execute.bzl) - -In general, sources, dependent libraries, and executables that will be needed at -runtime can use the same configuration. - -Tools that are executed as part of the build (such as compilers or code generators) -should be built for an exec configuration. In this case, specify `cfg = "exec"` in -the attribute. - -Otherwise, executables that are used at runtime (such as as part of a test) should -be built for the target configuration. In this case, specify `cfg = "target"` in -the attribute. - -`cfg = "target"` doesn't actually do anything: it's purely a convenience value to -help rule designers be explicit about their intentions. When `executable = False`, -which means `cfg` is optional, only set this when it truly helps readability. - -You can also use `cfg = my_transition` to use -[user-defined transitions](/extending/config#user-defined-transitions), which allow -rule authors a great deal of flexibility in changing configurations, with the -drawback of -[making the build graph larger and less comprehensible](/extending/config#memory-and-performance-considerations). - -**Note**: Historically, Bazel didn't have the concept of execution platforms, -and instead all build actions were considered to run on the host machine. Bazel -versions before 6.0 created a distinct "host" configuration to represent this. -If you see references to "host" in code or old documentation, that's what this -refers to. We recommend using Bazel 6.0 or newer to avoid this extra conceptual -overhead. - - - -### Configuration fragments - -Rules may access -[configuration fragments](/rules/lib/fragments) such as -`cpp` and `java`. However, all required fragments must be declared in -order to avoid access errors: - -```python -def _impl(ctx): - # Using ctx.fragments.cpp leads to an error since it was not declared. - x = ctx.fragments.java - ... - -my_rule = rule( - implementation = _impl, - fragments = ["java"], # Required fragments of the target configuration - ... -) -``` - -### Runfiles symlinks - -Normally, the relative path of a file in the runfiles tree is the same as the -relative path of that file in the source tree or generated output tree. If these -need to be different for some reason, you can specify the `root_symlinks` or -`symlinks` arguments. The `root_symlinks` is a dictionary mapping paths to -files, where the paths are relative to the root of the runfiles directory. The -`symlinks` dictionary is the same, but paths are implicitly prefixed with the -name of the main workspace (*not* the name of the repository containing the -current target). - -```python - ... - runfiles = ctx.runfiles( - root_symlinks = {"some/path/here.foo": ctx.file.some_data_file2} - symlinks = {"some/path/here.bar": ctx.file.some_data_file3} - ) - # Creates something like: - # sometarget.runfiles/ - # some/ - # path/ - # here.foo -> some_data_file2 - # / - # some/ - # path/ - # here.bar -> some_data_file3 -``` - -If `symlinks` or `root_symlinks` is used, be careful not to map two different -files to the same path in the runfiles tree. This will cause the build to fail -with an error describing the conflict. To fix, you will need to modify your -`ctx.runfiles` arguments to remove the collision. This checking will be done for -any targets using your rule, as well as targets of any kind that depend on those -targets. This is especially risky if your tool is likely to be used transitively -by another tool; symlink names must be unique across the runfiles of a tool and -all of its dependencies. - -### Code coverage - -When the [`coverage`](/reference/command-line-reference#coverage) command is run, -the build may need to add coverage instrumentation for certain targets. The -build also gathers the list of source files that are instrumented. The subset of -targets that are considered is controlled by the flag -[`--instrumentation_filter`](/reference/command-line-reference#flag--instrumentation_filter). -Test targets are excluded, unless -[`--instrument_test_targets`](/reference/command-line-reference#flag--instrument_test_targets) -is specified. - -If a rule implementation adds coverage instrumentation at build time, it needs -to account for that in its implementation function. -[ctx.coverage_instrumented](/rules/lib/builtins/ctx#coverage_instrumented) returns -`True` in coverage mode if a target's sources should be instrumented: - -```python -# Are this rule's sources instrumented? -if ctx.coverage_instrumented(): - # Do something to turn on coverage for this compile action -``` - -Logic that always needs to be on in coverage mode (whether a target's sources -specifically are instrumented or not) can be conditioned on -[ctx.configuration.coverage_enabled](/rules/lib/builtins/configuration#coverage_enabled). - -If the rule directly includes sources from its dependencies before compilation -(such as header files), it may also need to turn on compile-time instrumentation if -the dependencies' sources should be instrumented: - -```python -# Are this rule's sources or any of the sources for its direct dependencies -# in deps instrumented? -if (ctx.configuration.coverage_enabled and - (ctx.coverage_instrumented() or - any([ctx.coverage_instrumented(dep) for dep in ctx.attr.deps]))): - # Do something to turn on coverage for this compile action -``` - -Rules also should provide information about which attributes are relevant for -coverage with the `InstrumentedFilesInfo` provider, constructed using -[`coverage_common.instrumented_files_info`](/rules/lib/toplevel/coverage_common#instrumented_files_info). -The `dependency_attributes` parameter of `instrumented_files_info` should list -all runtime dependency attributes, including code dependencies like `deps` and -data dependencies like `data`. The `source_attributes` parameter should list the -rule's source files attributes if coverage instrumentation might be added: - -```python -def _example_library_impl(ctx): - ... - return [ - ... - coverage_common.instrumented_files_info( - ctx, - dependency_attributes = ["deps", "data"], - # Omitted if coverage is not supported for this rule: - source_attributes = ["srcs", "hdrs"], - ) - ... - ] -``` - -If `InstrumentedFilesInfo` is not returned, a default one is created with each -non-tool [dependency attribute](#dependency_attributes) that doesn't set -[`cfg`](#configuration) to `"exec"` in the attribute schema. in -`dependency_attributes`. (This isn't ideal behavior, since it puts attributes -like `srcs` in `dependency_attributes` instead of `source_attributes`, but it -avoids the need for explicit coverage configuration for all rules in the -dependency chain.) - -#### Test rules - -Test rules require additional setup to generate coverage reports. The rule -itself has to add the following implicit attributes: - -```python -my_test = rule( - ..., - attrs = { - ..., - # Implicit dependencies used by Bazel to generate coverage reports. - "_lcov_merger": attr.label( - default = configuration_field(fragment = "coverage", name = "output_generator"), - executable = True, - cfg = config.exec(exec_group = "test"), - ), - "_collect_cc_coverage": attr.label( - default = "@bazel_tools//tools/test:collect_cc_coverage", - executable = True, - cfg = config.exec(exec_group = "test"), - ) - }, - test = True, -) -``` - -By using `configuration_field`, the dependency on the Java LCOV merger tool can -be avoided as long as coverage is not requested. - -When the test is run, it should emit coverage information in the form of one or -more [LCOV files] -(https://manpages.debian.org/unstable/lcov/geninfo.1.en.html#TRACEFILE_FORMAT) -with unique names into the directory specified by the `COVERAGE_DIR` environment -variable. Bazel will then merge these files into a single LCOV file using the -`_lcov_merger` tool. If present, it will also collect C/C++ coverage using the -`_collect_cc_coverage` tool. - -### Validation Actions - -Sometimes you need to validate something about the build, and the -information required to do that validation is available only in artifacts -(source files or generated files). Because this information is in artifacts, -rules can't do this validation at analysis time because rules can't read -files. Instead, actions must do this validation at execution time. When -validation fails, the action will fail, and hence so will the build. - -Examples of validations that might be run are static analysis, linting, -dependency and consistency checks, and style checks. - -Validation actions can also help to improve build performance by moving parts -of actions that are not required for building artifacts into separate actions. -For example, if a single action that does compilation and linting can be -separated into a compilation action and a linting action, then the linting -action can be run as a validation action and run in parallel with other actions. - -These "validation actions" often don't produce anything that is used elsewhere -in the build, since they only need to assert things about their inputs. This -presents a problem though: If a validation action doesn't produce anything that -is used elsewhere in the build, how does a rule get the action to run? -Historically, the approach was to have the validation action output an empty -file, and artificially add that output to the inputs of some other important -action in the build: - - - -This works, because Bazel will always run the validation action when the compile -action is run, but this has significant drawbacks: - -1. The validation action is in the critical path of the build. Because Bazel -thinks the empty output is required to run the compile action, it will run the -validation action first, even though the compile action will ignore the input. -This reduces parallelism and slows down builds. - -2. If other actions in the build might run instead of the -compile action, then the empty outputs of validation actions need to be added to -those actions as well (`java_library`'s source jar output, for example). This is -also a problem if new actions that might run instead of the compile action are -added later, and the empty validation output is accidentally left off. - -The solution to these problems is to use the Validations Output Group. - -#### Validations Output Group - -The Validations Output Group is an output group designed to hold the otherwise -unused outputs of validation actions, so that they don't need to be artificially -added to the inputs of other actions. - -This group is special in that its outputs are always requested, regardless of -the value of the `--output_groups` flag, and regardless of how the target is -depended upon (for example, on the command line, as a dependency, or through -implicit outputs of the target). Note that normal caching and incrementality -still apply: if the inputs to the validation action have not changed and the -validation action previously succeeded, then the validation action won't be -run. - - - -Using this output group still requires that validation actions output some file, -even an empty one. This might require wrapping some tools that normally don't -create outputs so that a file is created. - -A target's validation actions are not run in three cases: - -* When the target is depended upon as a tool -* When the target is depended upon as an implicit dependency (for example, an - attribute that starts with "_") -* When the target is built in the exec configuration. - -It is assumed that these targets have their own -separate builds and tests that would uncover any validation failures. - -#### Using the Validations Output Group - -The Validations Output Group is named `_validation` and is used like any other -output group: - -```python -def _rule_with_validation_impl(ctx): - - ctx.actions.write(ctx.outputs.main, "main output\n") - ctx.actions.write(ctx.outputs.implicit, "implicit output\n") - - validation_output = ctx.actions.declare_file(ctx.attr.name + ".validation") - ctx.actions.run( - outputs = [validation_output], - executable = ctx.executable._validation_tool, - arguments = [validation_output.path], - ) - - return [ - DefaultInfo(files = depset([ctx.outputs.main])), - OutputGroupInfo(_validation = depset([validation_output])), - ] - - -rule_with_validation = rule( - implementation = _rule_with_validation_impl, - outputs = { - "main": "%{name}.main", - "implicit": "%{name}.implicit", - }, - attrs = { - "_validation_tool": attr.label( - default = Label("//validation_actions:validation_tool"), - executable = True, - cfg = "exec" - ), - } -) -``` - -Notice that the validation output file is not added to the `DefaultInfo` or the -inputs to any other action. The validation action for a target of this rule kind -will still run if the target is depended upon by label, or any of the target's -implicit outputs are directly or indirectly depended upon. - -It is usually important that the outputs of validation actions only go into the -validation output group, and are not added to the inputs of other actions, as -this could defeat parallelism gains. Note however that Bazel doesn't -have any special checking to enforce this. Therefore, you should test -that validation action outputs are not added to the inputs of any actions in the -tests for Starlark rules. For example: - -```python -load("@bazel_skylib//lib:unittest.bzl", "analysistest") - -def _validation_outputs_test_impl(ctx): - env = analysistest.begin(ctx) - - actions = analysistest.target_actions(env) - target = analysistest.target_under_test(env) - validation_outputs = target.output_groups._validation.to_list() - for action in actions: - for validation_output in validation_outputs: - if validation_output in action.inputs.to_list(): - analysistest.fail(env, - "%s is a validation action output, but is an input to action %s" % ( - validation_output, action)) - - return analysistest.end(env) - -validation_outputs_test = analysistest.make(_validation_outputs_test_impl) -``` - -#### Validation Actions Flag - -Running validation actions is controlled by the `--run_validations` command line -flag, which defaults to true. - -## Deprecated features - -### Deprecated predeclared outputs - -There are two **deprecated** ways of using predeclared outputs: - -* The [`outputs`](/rules/lib/globals/bzl#rule.outputs) parameter of `rule` specifies - a mapping between output attribute names and string templates for generating - predeclared output labels. Prefer using non-predeclared outputs and - explicitly adding outputs to `DefaultInfo.files`. Use the rule target's - label as input for rules which consume the output instead of a predeclared - output's label. - -* For [executable rules](#executable-rules), `ctx.outputs.executable` refers - to a predeclared executable output with the same name as the rule target. - Prefer declaring the output explicitly, for example with - `ctx.actions.declare_file(ctx.label.name)`, and ensure that the command that - generates the executable sets its permissions to allow execution. Explicitly - pass the executable output to the `executable` parameter of `DefaultInfo`. - -### Runfiles features to avoid - -[`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles) and the [`runfiles`](/rules/lib/builtins/runfiles) -type have a complex set of features, many of which are kept for legacy reasons. -The following recommendations help reduce complexity: - -* **Avoid** use of the `collect_data` and `collect_default` modes of - [`ctx.runfiles`](/rules/lib/builtins/ctx#runfiles). These modes implicitly collect - runfiles across certain hardcoded dependency edges in confusing ways. - Instead, add files using the `files` or `transitive_files` parameters of - `ctx.runfiles`, or by merging in runfiles from dependencies with - `runfiles = runfiles.merge(dep[DefaultInfo].default_runfiles)`. - -* **Avoid** use of the `data_runfiles` and `default_runfiles` of the - `DefaultInfo` constructor. Specify `DefaultInfo(runfiles = ...)` instead. - The distinction between "default" and "data" runfiles is maintained for - legacy reasons. For example, some rules put their default outputs in - `data_runfiles`, but not `default_runfiles`. Instead of using - `data_runfiles`, rules should *both* include default outputs and merge in - `default_runfiles` from attributes which provide runfiles (often - [`data`](/reference/be/common-definitions#common-attributes.data)). - -* When retrieving `runfiles` from `DefaultInfo` (generally only for merging - runfiles between the current rule and its dependencies), use - `DefaultInfo.default_runfiles`, **not** `DefaultInfo.data_runfiles`. - -### Migrating from legacy providers - -Historically, Bazel providers were simple fields on the `Target` object. They -were accessed using the dot operator, and they were created by putting the field -in a [`struct`](/rules/lib/builtins/struct) returned by the rule's -implementation function instead of a list of provider objects: - -```python -return struct(example_info = struct(headers = depset(...))) -``` - -Such providers can be retrieved from the corresponding field of the `Target` object: - -```python -transitive_headers = [hdr.example_info.headers for hdr in ctx.attr.hdrs] -``` - -*This style is deprecated and should not be used in new code;* see following for -information that may help you migrate. The new provider mechanism avoids name -clashes. It also supports data hiding, by requiring any code accessing a -provider instance to retrieve it using the provider symbol. - -For the moment, legacy providers are still supported. A rule can return both -legacy and modern providers as follows: - -```python -def _old_rule_impl(ctx): - ... - legacy_data = struct(x = "foo", ...) - modern_data = MyInfo(y = "bar", ...) - # When any legacy providers are returned, the top-level returned value is a - # struct. - return struct( - # One key = value entry for each legacy provider. - legacy_info = legacy_data, - ... - # Additional modern providers: - providers = [modern_data, ...]) -``` - -If `dep` is the resulting `Target` object for an instance of this rule, the -providers and their contents can be retrieved as `dep.legacy_info.x` and -`dep[MyInfo].y`. - -In addition to `providers`, the returned struct can also take several other -fields that have special meaning (and thus don't create a corresponding legacy -provider): - -* The fields `files`, `runfiles`, `data_runfiles`, `default_runfiles`, and - `executable` correspond to the same-named fields of - [`DefaultInfo`](/rules/lib/providers/DefaultInfo). It is not allowed to specify any of - these fields while also returning a `DefaultInfo` provider. - -* The field `output_groups` takes a struct value and corresponds to an - [`OutputGroupInfo`](/rules/lib/providers/OutputGroupInfo). - -In [`provides`](/rules/lib/globals/bzl#rule.provides) declarations of rules, and in -[`providers`](/rules/lib/toplevel/attr#label_list.providers) declarations of dependency -attributes, legacy providers are passed in as strings and modern providers are -passed in by their `Info` symbol. Be sure to change from strings to symbols -when migrating. For complex or large rule sets where it is difficult to update -all rules atomically, you may have an easier time if you follow this sequence of -steps: - -1. Modify the rules that produce the legacy provider to produce both the legacy - and modern providers, using the preceding syntax. For rules that declare they - return the legacy provider, update that declaration to include both the - legacy and modern providers. - -2. Modify the rules that consume the legacy provider to instead consume the - modern provider. If any attribute declarations require the legacy provider, - also update them to instead require the modern provider. Optionally, you can - interleave this work with step 1 by having consumers accept or require either - provider: Test for the presence of the legacy provider using - `hasattr(target, 'foo')`, or the new provider using `FooInfo in target`. - -3. Fully remove the legacy provider from all rules. diff --git a/8.4.2/extending/toolchains.mdx b/8.4.2/extending/toolchains.mdx deleted file mode 100644 index b904cbe..0000000 --- a/8.4.2/extending/toolchains.mdx +++ /dev/null @@ -1,600 +0,0 @@ ---- -title: 'Toolchains' ---- - - - -This page describes the toolchain framework, which is a way for rule authors to -decouple their rule logic from platform-based selection of tools. It is -recommended to read the [rules](/extending/rules) and [platforms](/extending/platforms) -pages before continuing. This page covers why toolchains are needed, how to -define and use them, and how Bazel selects an appropriate toolchain based on -platform constraints. - -## Motivation - -Let's first look at the problem toolchains are designed to solve. Suppose you -are writing rules to support the "bar" programming language. Your `bar_binary` -rule would compile `*.bar` files using the `barc` compiler, a tool that itself -is built as another target in your workspace. Since users who write `bar_binary` -targets shouldn't have to specify a dependency on the compiler, you make it an -implicit dependency by adding it to the rule definition as a private attribute. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - "_compiler": attr.label( - default = "//bar_tools:barc_linux", # the compiler running on linux - providers = [BarcInfo], - ), - }, -) -``` - -`//bar_tools:barc_linux` is now a dependency of every `bar_binary` target, so -it'll be built before any `bar_binary` target. It can be accessed by the rule's -implementation function just like any other attribute: - -```python -BarcInfo = provider( - doc = "Information about how to invoke the barc compiler.", - # In the real world, compiler_path and system_lib might hold File objects, - # but for simplicity they are strings for this example. arch_flags is a list - # of strings. - fields = ["compiler_path", "system_lib", "arch_flags"], -) - -def _bar_binary_impl(ctx): - ... - info = ctx.attr._compiler[BarcInfo] - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -The issue here is that the compiler's label is hardcoded into `bar_binary`, yet -different targets may need different compilers depending on what platform they -are being built for and what platform they are being built on -- called the -*target platform* and *execution platform*, respectively. Furthermore, the rule -author does not necessarily even know all the available tools and platforms, so -it is not feasible to hardcode them in the rule's definition. - -A less-than-ideal solution would be to shift the burden onto users, by making -the `_compiler` attribute non-private. Then individual targets could be -hardcoded to build for one platform or another. - -```python -bar_binary( - name = "myprog_on_linux", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_linux", -) - -bar_binary( - name = "myprog_on_windows", - srcs = ["mysrc.bar"], - compiler = "//bar_tools:barc_windows", -) -``` - -You can improve on this solution by using `select` to choose the `compiler` -[based on the platform](/docs/configurable-attributes): - -```python -config_setting( - name = "on_linux", - constraint_values = [ - "@platforms//os:linux", - ], -) - -config_setting( - name = "on_windows", - constraint_values = [ - "@platforms//os:windows", - ], -) - -bar_binary( - name = "myprog", - srcs = ["mysrc.bar"], - compiler = select({ - ":on_linux": "//bar_tools:barc_linux", - ":on_windows": "//bar_tools:barc_windows", - }), -) -``` - -But this is tedious and a bit much to ask of every single `bar_binary` user. -If this style is not used consistently throughout the workspace, it leads to -builds that work fine on a single platform but fail when extended to -multi-platform scenarios. It also does not address the problem of adding support -for new platforms and compilers without modifying existing rules or targets. - -The toolchain framework solves this problem by adding an extra level of -indirection. Essentially, you declare that your rule has an abstract dependency -on *some* member of a family of targets (a toolchain type), and Bazel -automatically resolves this to a particular target (a toolchain) based on the -applicable platform constraints. Neither the rule author nor the target author -need know the complete set of available platforms and toolchains. - -## Writing rules that use toolchains - -Under the toolchain framework, instead of having rules depend directly on tools, -they instead depend on *toolchain types*. A toolchain type is a simple target -that represents a class of tools that serve the same role for different -platforms. For instance, you can declare a type that represents the bar -compiler: - -```python -# By convention, toolchain_type targets are named "toolchain_type" and -# distinguished by their package path. So the full path for this would be -# //bar_tools:toolchain_type. -toolchain_type(name = "toolchain_type") -``` - -The rule definition in the previous section is modified so that instead of -taking in the compiler as an attribute, it declares that it consumes a -`//bar_tools:toolchain_type` toolchain. - -```python -bar_binary = rule( - implementation = _bar_binary_impl, - attrs = { - "srcs": attr.label_list(allow_files = True), - ... - # No `_compiler` attribute anymore. - }, - toolchains = ["//bar_tools:toolchain_type"], -) -``` - -The implementation function now accesses this dependency under `ctx.toolchains` -instead of `ctx.attr`, using the toolchain type as the key. - -```python -def _bar_binary_impl(ctx): - ... - info = ctx.toolchains["//bar_tools:toolchain_type"].barcinfo - # The rest is unchanged. - command = "%s -l %s %s" % ( - info.compiler_path, - info.system_lib, - " ".join(info.arch_flags), - ) - ... -``` - -`ctx.toolchains["//bar_tools:toolchain_type"]` returns the -[`ToolchainInfo` provider](/rules/lib/toplevel/platform_common#ToolchainInfo) -of whatever target Bazel resolved the toolchain dependency to. The fields of the -`ToolchainInfo` object are set by the underlying tool's rule; in the next -section, this rule is defined such that there is a `barcinfo` field that wraps -a `BarcInfo` object. - -Bazel's procedure for resolving toolchains to targets is described -[below](#toolchain-resolution). Only the resolved toolchain target is actually -made a dependency of the `bar_binary` target, not the whole space of candidate -toolchains. - -### Mandatory and Optional Toolchains - -By default, when a rule expresses a toolchain type dependency using a bare label -(as shown above), the toolchain type is considered to be **mandatory**. If Bazel -is unable to find a matching toolchain (see -[Toolchain resolution](#toolchain-resolution) below) for a mandatory toolchain -type, this is an error and analysis halts. - -It is possible instead to declare an **optional** toolchain type dependency, as -follows: - -```python -bar_binary = rule( - ... - toolchains = [ - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -When an optional toolchain type cannot be resolved, analysis continues, and the -result of `ctx.toolchains["//bar_tools:toolchain_type"]` is `None`. - -The [`config_common.toolchain_type`](/rules/lib/toplevel/config_common#toolchain_type) -function defaults to mandatory. - -The following forms can be used: - -- Mandatory toolchain types: - - `toolchains = ["//bar_tools:toolchain_type"]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type")]` - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = True)]` -- Optional toolchain types: - - `toolchains = [config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False)]` - -```python -bar_binary = rule( - ... - toolchains = [ - "//foo_tools:toolchain_type", - config_common.toolchain_type("//bar_tools:toolchain_type", mandatory = False), - ], -) -``` - -You can mix and match forms in the same rule, also. However, if the same -toolchain type is listed multiple times, it will take the most strict version, -where mandatory is more strict than optional. - -### Writing aspects that use toolchains - -Aspects have access to the same toolchain API as rules: you can define required -toolchain types, access toolchains via the context, and use them to generate new -actions using the toolchain. - -```py -bar_aspect = aspect( - implementation = _bar_aspect_impl, - attrs = {}, - toolchains = ['//bar_tools:toolchain_type'], -) - -def _bar_aspect_impl(target, ctx): - toolchain = ctx.toolchains['//bar_tools:toolchain_type'] - # Use the toolchain provider like in a rule. - return [] -``` - -## Defining toolchains - -To define some toolchains for a given toolchain type, you need three things: - -1. A language-specific rule representing the kind of tool or tool suite. By - convention this rule's name is suffixed with "\_toolchain". - - 1. **Note:** The `\_toolchain` rule cannot create any build actions. - Rather, it collects artifacts from other rules and forwards them to the - rule that uses the toolchain. That rule is responsible for creating all - build actions. - -2. Several targets of this rule type, representing versions of the tool or tool - suite for different platforms. - -3. For each such target, an associated target of the generic - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - rule, to provide metadata used by the toolchain framework. This `toolchain` - target also refers to the `toolchain_type` associated with this toolchain. - This means that a given `_toolchain` rule could be associated with any - `toolchain_type`, and that only in a `toolchain` instance that uses - this `_toolchain` rule that the rule is associated with a `toolchain_type`. - -For our running example, here's a definition for a `bar_toolchain` rule. Our -example has only a compiler, but other tools such as a linker could also be -grouped underneath it. - -```python -def _bar_toolchain_impl(ctx): - toolchain_info = platform_common.ToolchainInfo( - barcinfo = BarcInfo( - compiler_path = ctx.attr.compiler_path, - system_lib = ctx.attr.system_lib, - arch_flags = ctx.attr.arch_flags, - ), - ) - return [toolchain_info] - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler_path": attr.string(), - "system_lib": attr.string(), - "arch_flags": attr.string_list(), - }, -) -``` - -The rule must return a `ToolchainInfo` provider, which becomes the object that -the consuming rule retrieves using `ctx.toolchains` and the label of the -toolchain type. `ToolchainInfo`, like `struct`, can hold arbitrary field-value -pairs. The specification of exactly what fields are added to the `ToolchainInfo` -should be clearly documented at the toolchain type. In this example, the values -return wrapped in a `BarcInfo` object to reuse the schema defined above; this -style may be useful for validation and code reuse. - -Now you can define targets for specific `barc` compilers. - -```python -bar_toolchain( - name = "barc_linux", - arch_flags = [ - "--arch=Linux", - "--debug_everything", - ], - compiler_path = "/path/to/barc/on/linux", - system_lib = "/usr/lib/libbarc.so", -) - -bar_toolchain( - name = "barc_windows", - arch_flags = [ - "--arch=Windows", - # Different flags, no debug support on windows. - ], - compiler_path = "C:\\path\\on\\windows\\barc.exe", - system_lib = "C:\\path\\on\\windows\\barclib.dll", -) -``` - -Finally, you create `toolchain` definitions for the two `bar_toolchain` targets. -These definitions link the language-specific targets to the toolchain type and -provide the constraint information that tells Bazel when the toolchain is -appropriate for a given platform. - -```python -toolchain( - name = "barc_linux_toolchain", - exec_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:linux", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_linux", - toolchain_type = ":toolchain_type", -) - -toolchain( - name = "barc_windows_toolchain", - exec_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - target_compatible_with = [ - "@platforms//os:windows", - "@platforms//cpu:x86_64", - ], - toolchain = ":barc_windows", - toolchain_type = ":toolchain_type", -) -``` - -The use of relative path syntax above suggests these definitions are all in the -same package, but there's no reason the toolchain type, language-specific -toolchain targets, and `toolchain` definition targets can't all be in separate -packages. - -See the [`go_toolchain`](https://github.com/bazelbuild/rules_go/blob/master/go/private/go_toolchain.bzl) -for a real-world example. - -### Toolchains and configurations - -An important question for rule authors is, when a `bar_toolchain` target is -analyzed, what [configuration](/reference/glossary#configuration) does it see, and what transitions -should be used for dependencies? The example above uses string attributes, but -what would happen for a more complicated toolchain that depends on other targets -in the Bazel repository? - -Let's see a more complex version of `bar_toolchain`: - -```python -def _bar_toolchain_impl(ctx): - # The implementation is mostly the same as above, so skipping. - pass - -bar_toolchain = rule( - implementation = _bar_toolchain_impl, - attrs = { - "compiler": attr.label( - executable = True, - mandatory = True, - cfg = "exec", - ), - "system_lib": attr.label( - mandatory = True, - cfg = "target", - ), - "arch_flags": attr.string_list(), - }, -) -``` - -The use of [`attr.label`](/rules/lib/toplevel/attr#label) is the same as for a standard rule, -but the meaning of the `cfg` parameter is slightly different. - -The dependency from a target (called the "parent") to a toolchain via toolchain -resolution uses a special configuration transition called the "toolchain -transition". The toolchain transition keeps the configuration the same, except -that it forces the execution platform to be the same for the toolchain as for -the parent (otherwise, toolchain resolution for the toolchain could pick any -execution platform, and wouldn't necessarily be the same as for parent). This -allows any `exec` dependencies of the toolchain to also be executable for the -parent's build actions. Any of the toolchain's dependencies which use `cfg = -"target"` (or which don't specify `cfg`, since "target" is the default) are -built for the same target platform as the parent. This allows toolchain rules to -contribute both libraries (the `system_lib` attribute above) and tools (the -`compiler` attribute) to the build rules which need them. The system libraries -are linked into the final artifact, and so need to be built for the same -platform, whereas the compiler is a tool invoked during the build, and needs to -be able to run on the execution platform. - -## Registering and building with toolchains - -At this point all the building blocks are assembled, and you just need to make -the toolchains available to Bazel's resolution procedure. This is done by -registering the toolchain, either in a `MODULE.bazel` file using -`register_toolchains()`, or by passing the toolchains' labels on the command -line using the `--extra_toolchains` flag. - -```python -register_toolchains( - "//bar_tools:barc_linux_toolchain", - "//bar_tools:barc_windows_toolchain", - # Target patterns are also permitted, so you could have also written: - # "//bar_tools:all", - # or even - # "//bar_tools/...", -) -``` - -When using target patterns to register toolchains, the order in which the -individual toolchains are registered is determined by the following rules: - -* The toolchains defined in a subpackage of a package are registered before the - toolchains defined in the package itself. -* Within a package, toolchains are registered in the lexicographical order of - their names. - -Now when you build a target that depends on a toolchain type, an appropriate -toolchain will be selected based on the target and execution platforms. - -```python -# my_pkg/BUILD - -platform( - name = "my_target_platform", - constraint_values = [ - "@platforms//os:linux", - ], -) - -bar_binary( - name = "my_bar_binary", - ... -) -``` - -```sh -bazel build //my_pkg:my_bar_binary --platforms=//my_pkg:my_target_platform -``` - -Bazel will see that `//my_pkg:my_bar_binary` is being built with a platform that -has `@platforms//os:linux` and therefore resolve the -`//bar_tools:toolchain_type` reference to `//bar_tools:barc_linux_toolchain`. -This will end up building `//bar_tools:barc_linux` but not -`//bar_tools:barc_windows`. - -## Toolchain resolution - -Note: [Some Bazel rules](/concepts/platforms#status) do not yet support -toolchain resolution. - -For each target that uses toolchains, Bazel's toolchain resolution procedure -determines the target's concrete toolchain dependencies. The procedure takes as -input a set of required toolchain types, the target platform, the list of -available execution platforms, and the list of available toolchains. Its outputs -are a selected toolchain for each toolchain type as well as a selected execution -platform for the current target. - -The available execution platforms and toolchains are gathered from the -external dependency graph via -[`register_execution_platforms`](/rules/lib/globals/module#register_execution_platforms) -and -[`register_toolchains`](/rules/lib/globals/module#register_toolchains) calls in -`MODULE.bazel` files. -Additional execution platforms and toolchains may also be specified on the -command line via -[`--extra_execution_platforms`](/reference/command-line-reference#flag--extra_execution_platforms) -and -[`--extra_toolchains`](/reference/command-line-reference#flag--extra_toolchains). -The host platform is automatically included as an available execution platform. -Available platforms and toolchains are tracked as ordered lists for determinism, -with preference given to earlier items in the list. - -The set of available toolchains, in priority order, is created from -`--extra_toolchains` and `register_toolchains`: - -1. Toolchains registered using `--extra_toolchains` are added first. (Within - these, the **last** toolchain has highest priority.) -2. Toolchains registered using `register_toolchains` in the transitive external - dependency graph, in the following order: (Within these, the **first** - mentioned toolchain has highest priority.) - 1. Toolchains registered by the root module (as in, the `MODULE.bazel` at the - workspace root); - 2. Toolchains registered in the user's `WORKSPACE` file, including in any - macros invoked from there; - 3. Toolchains registered by non-root modules (as in, dependencies specified by - the root module, and their dependencies, and so forth); - 4. Toolchains registered in the "WORKSPACE suffix"; this is only used by - certain native rules bundled with the Bazel installation. - -**NOTE:** [Pseudo-targets like `:all`, `:*`, and -`/...`](/run/build#specifying-build-targets) are ordered by Bazel's package -loading mechanism, which uses a lexicographic ordering. - -The resolution steps are as follows. - -1. A `target_compatible_with` or `exec_compatible_with` clause *matches* a - platform if, for each `constraint_value` in its list, the platform also has - that `constraint_value` (either explicitly or as a default). - - If the platform has `constraint_value`s from `constraint_setting`s not - referenced by the clause, these do not affect matching. - -1. If the target being built specifies the - [`exec_compatible_with` attribute](/reference/be/common-definitions#common.exec_compatible_with) - (or its rule definition specifies the - [`exec_compatible_with` argument](/rules/lib/globals/bzl#rule.exec_compatible_with)), - the list of available execution platforms is filtered to remove - any that do not match the execution constraints. - -1. The list of available toolchains is filtered to remove any toolchains - specifying `target_settings` that don't match the current configuration. - -1. For each available execution platform, you associate each toolchain type with - the first available toolchain, if any, that is compatible with this execution - platform and the target platform. - -1. Any execution platform that failed to find a compatible mandatory toolchain - for one of its toolchain types is ruled out. Of the remaining platforms, the - first one becomes the current target's execution platform, and its associated - toolchains (if any) become dependencies of the target. - -The chosen execution platform is used to run all actions that the target -generates. - -In cases where the same target can be built in multiple configurations (such as -for different CPUs) within the same build, the resolution procedure is applied -independently to each version of the target. - -If the rule uses [execution groups](/extending/exec-groups), each execution -group performs toolchain resolution separately, and each has its own execution -platform and toolchains. - -## Debugging toolchains - -If you are adding toolchain support to an existing rule, use the -`--toolchain_resolution_debug=regex` flag. During toolchain resolution, the flag -provides verbose output for toolchain types or target names that match the regex variable. You -can use `.*` to output all information. Bazel will output names of toolchains it -checks and skips during the resolution process. - -If you'd like to see which [`cquery`](/query/cquery) dependencies are from toolchain -resolution, use `cquery`'s [`--transitions`](/query/cquery#transitions) flag: - -``` -# Find all direct dependencies of //cc:my_cc_lib. This includes explicitly -# declared dependencies, implicit dependencies, and toolchain dependencies. -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' -//cc:my_cc_lib (96d6638) -@bazel_tools//tools/cpp:toolchain (96d6638) -@bazel_tools//tools/def_parser:def_parser (HOST) -//cc:my_cc_dep (96d6638) -@local_config_platform//:host (96d6638) -@bazel_tools//tools/cpp:toolchain_type (96d6638) -//:default_host_platform (96d6638) -@local_config_cc//:cc-compiler-k8 (HOST) -//cc:my_cc_lib.cc (null) -@bazel_tools//tools/cpp:grep-includes (HOST) - -# Which of these are from toolchain resolution? -$ bazel cquery 'deps(//cc:my_cc_lib, 1)' --transitions=lite | grep "toolchain dependency" - [toolchain dependency]#@local_config_cc//:cc-compiler-k8#HostTransition -> b6df211 -``` diff --git a/8.4.2/external/advanced.mdx b/8.4.2/external/advanced.mdx deleted file mode 100644 index 26ece4d..0000000 --- a/8.4.2/external/advanced.mdx +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: 'Advanced topics on external dependencies' ---- - - - -## Shadowing dependencies in WORKSPACE - -Note: This section applies to the [WORKSPACE -system](/external/overview#workspace-system) only. For -[Bzlmod](/external/overview#bzlmod), use a [multiple-version -override](/external/module#multiple-version_override). - -Whenever possible, have a single version policy in your project, which is -required for dependencies that you compile against and end up in your final -binary. For other cases, you can shadow dependencies: - -myproject/WORKSPACE - -```python -workspace(name = "myproject") - -local_repository( - name = "A", - path = "../A", -) -local_repository( - name = "B", - path = "../B", -) -``` - -A/WORKSPACE - -```python -workspace(name = "A") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "...", -) -``` - -B/WORKSPACE {# This is not a buganizer link okay?? #} - -```python -workspace(name = "B") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -``` - -Both dependencies `A` and `B` depend on different versions of `testrunner`. -Include both in `myproject` without conflict by giving them distinct names in -`myproject/WORKSPACE`: - -```python -workspace(name = "myproject") - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -http_archive( - name = "testrunner-v1", - urls = ["https://github.com/testrunner/v1.zip"], - sha256 = "..." -) -http_archive( - name = "testrunner-v2", - urls = ["https://github.com/testrunner/v2.zip"], - sha256 = "..." -) -local_repository( - name = "A", - path = "../A", - repo_mapping = {"@testrunner" : "@testrunner-v1"} -) -local_repository( - name = "B", - path = "../B", - repo_mapping = {"@testrunner" : "@testrunner-v2"} -) -``` - -You can also use this mechanism to join diamonds. For example, if `A` and `B` -have the same dependency but call it by different names, join those dependencies -in `myproject/WORKSPACE`. - -## Overriding repositories from the command line - -To override a declared repository with a local repository from the command line, -use the -[`--override_repository`](/reference/command-line-reference#flag--override_repository) -flag. Using this flag changes the contents of external repositories without -changing your source code. - -For example, to override `@foo` to the local directory `/path/to/local/foo`, -pass the `--override_repository=foo=/path/to/local/foo` flag. - -Use cases include: - -* Debugging issues. For example, to override an `http_archive` repository to a - local directory where you can make changes more easily. -* Vendoring. If you are in an environment where you cannot make network calls, - override the network-based repository rules to point to local directories - instead. - -Note: With [Bzlmod](/external/overview#bzlmod), remember to use canonical repo -names here. Alternatively, use the -[`--override_module`](/reference/command-line-reference#flag--override_module) -flag to override a module to a local directory, similar to the -[`local_path_override`](/rules/lib/globals/module#local_path_override) directive in -`MODULE.bazel`. - -## Using proxies - -Bazel picks up proxy addresses from the `HTTPS_PROXY` and `HTTP_PROXY` -environment variables and uses these to download `HTTP` and `HTTPS` files (if -specified). - -## Support for IPv6 - -On IPv6-only machines, Bazel can download dependencies with no changes. However, -on dual-stack IPv4/IPv6 machines Bazel follows the same convention as Java, -preferring IPv4 if enabled. In some situations, for example when the IPv4 -network cannot resolve/reach external addresses, this can cause `Network -unreachable` exceptions and build failures. In these cases, you can override -Bazel's behavior to prefer IPv6 by using the -[`java.net.preferIPv6Addresses=true` system -property](https://docs.oracle.com/javase/8/docs/api/java/net/doc-files/net-properties.html). -Specifically: - -* Use `--host_jvm_args=-Djava.net.preferIPv6Addresses=true` [startup - option](/docs/user-manual#startup-options), for example by adding the - following line in your [`.bazelrc` file](/run/bazelrc): - - `startup --host_jvm_args=-Djava.net.preferIPv6Addresses=true` - -* When running Java build targets that need to connect to the internet (such - as for integration tests), use the - `--jvmopt=-Djava.net.preferIPv6Addresses=true` [tool - flag](/docs/user-manual#jvmopt). For example, include in your [`.bazelrc` - file](/run/bazelrc): - - `build --jvmopt=-Djava.net.preferIPv6Addresses` - -* If you are using [`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) - for dependency version resolution, also add - `-Djava.net.preferIPv6Addresses=true` to the `COURSIER_OPTS` environment - variable to [provide JVM options for - Coursier](https://github.com/bazelbuild/rules_jvm_external#provide-jvm-options-for-coursier-with-coursier_opts). - -## Offline builds - -Sometimes you may wish to run a build offline, such as when traveling on an -airplane. For such simple use cases, prefetch the needed repositories with -`bazel fetch` or `bazel sync`. To disable fetching further repositories during -the build, use the option `--nofetch`. - -For true offline builds, where a different entity supplies all needed files, -Bazel supports the option `--distdir`. This flag tells Bazel to look first into -the directories specified by that option when a repository rule asks Bazel to -fetch a file with [`ctx.download`](/rules/lib/builtins/repository_ctx#download) or -[`ctx.download_and_extract`](/rules/lib/builtins/repository_ctx#download_and_extract). By -providing a hash sum of the file needed, Bazel looks for a file matching the -basename of the first URL, and uses the local copy if the hash matches. - -Bazel itself uses this technique to bootstrap offline from the [distribution -artifact](https://github.com/bazelbuild/bazel-website/blob/master/designs/_posts/2016-10-11-distribution-artifact.md). -It does so by [collecting all the needed external -dependencies](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/WORKSPACE#L116) -in an internal -[`distdir_tar`](https://github.com/bazelbuild/bazel/blob/5cfa0303d6ac3b5bd031ff60272ce80a704af8c2/distdir.bzl#L44). - -Bazel allows execution of arbitrary commands in repository rules without knowing -if they call out to the network, and so cannot enforce fully offline builds. To -test if a build works correctly offline, manually block off the network (as -Bazel does in its [bootstrap -test](https://cs.opensource.google/bazel/bazel/+/master:src/test/shell/bazel/BUILD;l=1073;drc=88c426e73cc0eb0a41c0d7995e36acd94e7c9a48)). diff --git a/8.4.2/external/lockfile.mdx b/8.4.2/external/lockfile.mdx deleted file mode 100644 index af13c56..0000000 --- a/8.4.2/external/lockfile.mdx +++ /dev/null @@ -1,287 +0,0 @@ -keywords: product:Bazel,lockfile,Bzlmod ---- -title: 'Bazel Lockfile' ---- - - - -The lockfile feature in Bazel enables the recording of specific versions or -dependencies of software libraries or packages required by a project. It -achieves this by storing the result of module resolution and extension -evaluation. The lockfile promotes reproducible builds, ensuring consistent -development environments. Additionally, it enhances build efficiency by allowing -Bazel to skip the parts of the resolution process that are unaffected by changes -in project dependencies. Furthermore, the lockfile improves stability by -preventing unexpected updates or breaking changes in external libraries, thereby -reducing the risk of introducing bugs. - -## Lockfile Generation - -The lockfile is generated under the workspace root with the name -`MODULE.bazel.lock`. It is created or updated during the build process, -specifically after module resolution and extension evaluation. Importantly, it -only includes dependencies that are included in the current invocation of the -build. - -When changes occur in the project that affect its dependencies, the lockfile is -automatically updated to reflect the new state. This ensures that the lockfile -remains focused on the specific set of dependencies required for the current -build, providing an accurate representation of the project's resolved -dependencies. - -## Lockfile Usage - -The lockfile can be controlled by the flag -[`--lockfile_mode`](/reference/command-line-reference#flag--lockfile_mode) to -customize the behavior of Bazel when the project state differs from the -lockfile. The available modes are: - -* `update` (Default): Use the information that is present in the lockfile to - skip downloads of known registry files and to avoid re-evaluating extensions - whose results are still up-to-date. If information is missing, it will - be added to the lockfile. In this mode, Bazel also avoids refreshing - mutable information, such as yanked versions, for dependencies that haven't - changed. -* `refresh`: Like `update`, but mutable information is always refreshed when - switching to this mode and roughly every hour while in this mode. -* `error`: Like `update`, but if any information is missing or out-of-date, - Bazel will fail with an error. This mode never changes the lockfile or - performs network requests during resolution. Module extensions that marked - themselves as `reproducible` may still perform network requests, but are - expected to always produce the same result. -* `off`: The lockfile is neither checked nor updated. - -## Lockfile Benefits - -The lockfile offers several benefits and can be utilized in various ways: - -- **Reproducible builds.** By capturing the specific versions or dependencies - of software libraries, the lockfile ensures that builds are reproducible - across different environments and over time. Developers can rely on - consistent and predictable results when building their projects. - -- **Fast incremental resolutions.** The lockfile enables Bazel to avoid - downloading registry files that were already used in a previous build. - This significantly improves build efficiency, especially in scenarios where - resolution can be time-consuming. - -- **Stability and risk reduction.** The lockfile helps maintain stability by - preventing unexpected updates or breaking changes in external libraries. By - locking the dependencies to specific versions, the risk of introducing bugs - due to incompatible or untested updates is reduced. -- - -### Hidden lockfile - -Bazel also maintains another lockfile at -`"$(bazel info output_base)"/MODULE.bazel.lock`. The format and contents of this -lockfile are explicitly unspecified. It is only used as a performance -optimization. While it can be deleted together with the output base via -`bazel clean --expunge`, any need to do so is a bug in either Bazel itself or a -module extension. - -## Lockfile Contents - -The lockfile contains all the necessary information to determine whether the -project state has changed. It also includes the result of building the project -in the current state. The lockfile consists of two main parts: - -1. Hashes of all remote files that are inputs to module resolution. -2. For each module extension, the lockfile includes inputs that affect it, - represented by `bzlTransitiveDigest`, `usagesDigest` and other fields, as - well as the output of running that extension, referred to as - `generatedRepoSpecs` - -Here is an example that demonstrates the structure of the lockfile, along with -explanations for each section: - -```json -{ - "lockFileVersion": 10, - "registryFileHashes": { - "https://bcr.bazel.build/bazel_registry.json": "8a28e4af...5d5b3497", - "https://bcr.bazel.build/modules/foo/1.0/MODULE.bazel": "7cd0312e...5c96ace2", - "https://bcr.bazel.build/modules/foo/2.0/MODULE.bazel": "70390338... 9fc57589", - "https://bcr.bazel.build/modules/foo/2.0/source.json": "7e3a9adf...170d94ad", - "https://registry.mycorp.com/modules/foo/1.0/MODULE.bazel": "not found", - ... - }, - "selectedYankedVersions": { - "foo@2.0": "Yanked for demo purposes" - }, - "moduleExtensions": { - "//:extension.bzl%lockfile_ext": { - "general": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05yyDNGN7oh7QE9kBADr3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - }, - "//:extension.bzl%lockfile_ext2": { - "os:macos": { - "bzlTransitiveDigest": "oWDzxG/aLnyY6Ubrfy....+Jp6maQvEPxn0pBM=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - }, - "os:linux": { - "bzlTransitiveDigest": "eWDzxG/aLsyY3Ubrto....+Jp4maQvEPxn0pLK=", - "usagesDigest": "aLmqbvowmHkkBPve05y....yDNGN7oh7r3QIZTZs=", - ..., - "generatedRepoSpecs": { - "hello": { - "bzlFile": "@@//:extension.bzl", - ... - } - } - } - } - } -} -``` - -### Registry File Hashes - -The `registryFileHashes` section contains the hashes of all files from -remote registries accessed during module resolution. Since the resolution -algorithm is fully deterministic when given the same inputs and all remote -inputs are hashed, this ensures a fully reproducible resolution result while -avoiding excessive duplication of remote information in the lockfile. Note that -this also requires recording when a particular registry didn't contain a certain -module, but a registry with lower precedence did (see the "not found" entry in -the example). This inherently mutable information can be updated via -`bazel mod deps --lockfile_mode=refresh`. - -Bazel uses the hashes from the lockfile to look up registry files in the -repository cache before downloading them, which speeds up subsequent -resolutions. - -### Selected Yanked Versions - -The `selectedYankedVersions` section contains the yanked versions of modules -that were selected by module resolution. Since this usually result in an error -when trying to build, this section is only non-empty when yanked versions are -explicitly allowed via `--allow_yanked_versions` or -`BZLMOD_ALLOW_YANKED_VERSIONS`. - -This field is needed since, compared to module files, yanked version information -is inherently mutable and thus can't be referenced by a hash. This information -can be updated via `bazel mod deps --lockfile_mode=refresh`. - -### Module Extensions - -The `moduleExtensions` section is a map that includes only the extensions used -in the current invocation or previously invoked, while excluding any extensions -that are no longer utilized. In other words, if an extension is not being used -anymore across the dependency graph, it is removed from the `moduleExtensions` -map. - -If an extension is independent of the operating system or architecture type, -this section features only a single "general" entry. Otherwise, multiple -entries are included, named after the OS, architecture, or both, with each -corresponding to the result of evaluating the extension on those specifics. - -Each entry in the extension map corresponds to a used extension and is -identified by its containing file and name. The corresponding value for each -entry contains the relevant information associated with that extension: - -1. The `bzlTransitiveDigest` is the digest of the extension implementation - and the .bzl files transitively loaded by it. -2. The `usagesDigest` is the digest of the _usages_ of the extension in the - dependency graph, which includes all tags. -3. Further unspecified fields that track other inputs to the extension, - such as contents of files or directories it reads or environment - variables it uses. -4. The `generatedRepoSpecs` encode the repositories created by the - extension with the current input. -5. The optional `moduleExtensionMetadata` field contains metadata provided by - the extension such as whether certain repositories it created should be - imported via `use_repo` by the root module. This information powers the - `bazel mod tidy` command. - -Module extensions can opt out of being included in the lockfile by setting the -returning metadata with `reproducible = True`. By doing so, they promise that -they will always create the same repositories when given the same inputs. - -## Best Practices - -To maximize the benefits of the lockfile feature, consider the following best -practices: - -* Regularly update the lockfile to reflect changes in project dependencies or - configuration. This ensures that subsequent builds are based on the most - up-to-date and accurate set of dependencies. To lock down all extensions - at once, run `bazel mod deps --lockfile_mode=update`. - -* Include the lockfile in version control to facilitate collaboration and - ensure that all team members have access to the same lockfile, promoting - consistent development environments across the project. - -* Use [`bazelisk`](/install/bazelisk) to run Bazel, and include a - `.bazelversion` file in version control that specifies the Bazel version - corresponding to the lockfile. Because Bazel itself is a dependency of - your build, the lockfile is specific to the Bazel version, and will - change even between [backwards compatible](/release/backward-compatibility) - Bazel releases. Using `bazelisk` ensures that all developers are using - a Bazel version that matches the lockfile. - -By following these best practices, you can effectively utilize the lockfile -feature in Bazel, leading to more efficient, reliable, and collaborative -software development workflows. - -## Merge Conflicts - -The lockfile format is designed to minimize merge conflicts, but they can still -happen. - -### Automatic Resolution - -Bazel provides a custom -[git merge driver](https://git-scm.com/docs/gitattributes#_defining_a_custom_merge_driver) -to help resolve these conflicts automatically. - -Set up the driver by adding this line to a `.gitattributes` file in the root of -your git repository: - -```gitattributes -# A custom merge driver for the Bazel lockfile. -# https://bazel.build/external/lockfile#automatic-resolution -MODULE.bazel.lock merge=bazel-lockfile-merge -``` - -Then each developer who wants to use the driver has to register it once by -following these steps: - -1. Install [jq](https://jqlang.github.io/jq/download/) (1.5 or higher). -2. Run the following commands: - -```bash -jq_script=$(curl https://raw.githubusercontent.com/bazelbuild/bazel/master/scripts/bazel-lockfile-merge.jq) -printf '%s\n' "${jq_script}" | less # to optionally inspect the jq script -git config --global merge.bazel-lockfile-merge.name "Merge driver for the Bazel lockfile (MODULE.bazel.lock)" -git config --global merge.bazel-lockfile-merge.driver "jq -s '${jq_script}' -- %O %A %B > %A.jq_tmp && mv %A.jq_tmp %A" -``` - -### Manual Resolution - -Simple merge conflicts in the `registryFileHashes` and `selectedYankedVersions` -fields can be safely resolved by keeping all the entries from both sides of the -conflict. - -Other types of merge conflicts should not be resolved manually. Instead: - -1. Restore the previous state of the lockfile - via `git reset MODULE.bazel.lock && git checkout MODULE.bazel.lock`. -2. Resolve any conflicts in the `MODULE.bazel` file. -3. Run `bazel mod deps` to update the lockfile. diff --git a/8.4.2/external/module.mdx b/8.4.2/external/module.mdx deleted file mode 100644 index 6a9cf13..0000000 --- a/8.4.2/external/module.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Bazel modules' ---- - - - -A Bazel **module** is a Bazel project that can have multiple versions, each of -which publishes metadata about other modules that it depends on. This is -analogous to familiar concepts in other dependency management systems, such as a -Maven *artifact*, an npm *package*, a Go *module*, or a Cargo *crate*. - -A module must have a `MODULE.bazel` file at its repo root. This file is the -module's manifest, declaring its name, version, list of direct dependencies, and -other information. For a basic example: - -```python -module(name = "my-module", version = "1.0") - -bazel_dep(name = "rules_cc", version = "0.0.1") -bazel_dep(name = "protobuf", version = "3.19.0") -``` - -See the [full list](/rules/lib/globals/module) of directives available in -`MODULE.bazel` files. - -To perform module resolution, Bazel starts by reading the root module's -`MODULE.bazel` file, and then repeatedly requests any dependency's -`MODULE.bazel` file from a [Bazel registry](/external/registry) until it -discovers the entire dependency graph. - -By default, Bazel then [selects](#version-selection) one version of each module -to use. Bazel represents each module with a repo, and consults the registry -again to learn how to define each of the repos. - -## Version format - -Bazel has a diverse ecosystem and projects use various versioning schemes. The -most popular by far is [SemVer](https://semver.org), but there are -also prominent projects using different schemes such as -[Abseil](https://github.com/abseil/abseil-cpp/releases), whose -versions are date-based, for example `20210324.2`). - -For this reason, Bzlmod adopts a more relaxed version of the SemVer spec. The -differences include: - -* SemVer prescribes that the "release" part of the version must consist of 3 - segments: `MAJOR.MINOR.PATCH`. In Bazel, this requirement is loosened so - that any number of segments is allowed. -* In SemVer, each of the segments in the "release" part must be digits only. - In Bazel, this is loosened to allow letters too, and the comparison - semantics match the "identifiers" in the "prerelease" part. -* Additionally, the semantics of major, minor, and patch version increases are - not enforced. However, see [compatibility level](#compatibility_level) for - details on how we denote backwards compatibility. - -Any valid SemVer version is a valid Bazel module version. Additionally, two -SemVer versions `a` and `b` compare `a < b` if and only if the same holds when -they're compared as Bazel module versions. - -## Version selection - -Consider the diamond dependency problem, a staple in the versioned dependency -management space. Suppose you have the dependency graph: - -``` - A 1.0 - / \ - B 1.0 C 1.1 - | | - D 1.0 D 1.1 -``` - -Which version of `D` should be used? To resolve this question, Bzlmod uses the -[Minimal Version Selection](https://research.swtch.com/vgo-mvs) -(MVS) algorithm introduced in the Go module system. MVS assumes that all new -versions of a module are backwards compatible, and so picks the highest version -specified by any dependent (`D 1.1` in our example). It's called "minimal" -because `D 1.1` is the earliest version that could satisfy our requirements — -even if `D 1.2` or newer exists, we don't select them. Using MVS creates a -version selection process that is *high-fidelity* and *reproducible*. - -### Yanked versions - -The registry can declare certain versions as *yanked* if they should be avoided -(such as for security vulnerabilities). Bazel throws an error when selecting a -yanked version of a module. To fix this error, either upgrade to a newer, -non-yanked version, or use the -[`--allow_yanked_versions`](/reference/command-line-reference#flag--allow_yanked_versions) -flag to explicitly allow the yanked version. - -## Compatibility level - -In Go, MVS's assumption about backwards compatibility works because it treats -backwards incompatible versions of a module as a separate module. In terms of -SemVer, that means `A 1.x` and `A 2.x` are considered distinct modules, and can -coexist in the resolved dependency graph. This is, in turn, made possible by -encoding the major version in the package path in Go, so there aren't any -compile-time or linking-time conflicts. - -Bazel, however, cannot provide such guarantees, so it needs the "major version" -number in order to detect backwards incompatible versions. This number is called -the *compatibility level*, and is specified by each module version in its -`module()` directive. With this information, Bazel can throw an error when it -detects that versions of the same module with different compatibility levels -exist in the resolved dependency graph. - -## Overrides - -Specify overrides in the `MODULE.bazel` file to alter the behavior of Bazel -module resolution. Only the root module's overrides take effect — if a module is -used as a dependency, its overrides are ignored. - -Each override is specified for a certain module name, affecting all of its -versions in the dependency graph. Although only the root module's overrides take -effect, they can be for transitive dependencies that the root module does not -directly depend on. - -### Single-version override - -The [`single_version_override`](/rules/lib/globals/module#single_version_override) -serves multiple purposes: - -* With the `version` attribute, you can pin a dependency to a specific - version, regardless of which versions of the dependency are requested in the - dependency graph. -* With the `registry` attribute, you can force this dependency to come from a - specific registry, instead of following the normal [registry - selection](/external/registry#selecting_registries) process. -* With the `patch*` attributes, you can specify a set of patches to apply to - the downloaded module. - -These attributes are all optional and can be mixed and matched with each other. - -### Multiple-version override - -A [`multiple_version_override`](/rules/lib/globals/module#multiple_version_override) -can be specified to allow multiple versions of the same module to coexist in the -resolved dependency graph. - -You can specify an explicit list of allowed versions for the module, which must -all be present in the dependency graph before resolution — there must exist -*some* transitive dependency depending on each allowed version. After -resolution, only the allowed versions of the module remain, while Bazel upgrades -other versions of the module to the nearest higher allowed version at the same -compatibility level. If no higher allowed version at the same compatibility -level exists, Bazel throws an error. - -For example, if versions `1.1`, `1.3`, `1.5`, `1.7`, and `2.0` exist in the -dependency graph before resolution and the major version is the compatibility -level: - -* A multiple-version override allowing `1.3`, `1.7`, and `2.0` results in - `1.1` being upgraded to `1.3`, `1.5` being upgraded to `1.7`, and other - versions remaining the same. -* A multiple-version override allowing `1.5` and `2.0` results in an error, as - `1.7` has no higher version at the same compatibility level to upgrade to. -* A multiple-version override allowing `1.9` and `2.0` results in an error, as - `1.9` is not present in the dependency graph before resolution. - -Additionally, users can also override the registry using the `registry` -attribute, similarly to single-version overrides. - -### Non-registry overrides - -Non-registry overrides completely remove a module from version resolution. Bazel -does not request these `MODULE.bazel` files from a registry, but instead from -the repo itself. - -Bazel supports the following non-registry overrides: - -* [`archive_override`](/rules/lib/globals/module#archive_override) -* [`git_override`](/rules/lib/globals/module#git_override) -* [`local_path_override`](/rules/lib/globals/module#local_path_override) - -## Define repos that don't represent Bazel modules - -With `bazel_dep`, you can define repos that represent other Bazel modules. -Sometimes there is a need to define a repo that does _not_ represent a Bazel -module; for example, one that contains a plain JSON file to be read as data. - -In this case, you could use the [`use_repo_rule` -directive](/rules/lib/globals/module#use_repo_rule) to directly define a repo -by invoking a repo rule. This repo will only be visible to the module it's -defined in. - -Under the hood, this is implemented using the same mechanism as [module -extensions](/external/extension), which lets you define repos with more -flexibility. - -## Repository names and strict deps - -The [apparent name](/external/overview#apparent-repo-name) of a repo backing a -module to its direct dependents defaults to its module name, unless the -`repo_name` attribute of the [`bazel_dep`](/rules/lib/globals/module#bazel_dep) -directive says otherwise. Note that this means a module can only find its direct -dependencies. This helps prevent accidental breakages due to changes in -transitive dependencies. - -The [canonical name](/external/overview#canonical-repo-name) of a repo backing a -module is either `{{ "" }}module_name{{ "" }}+{{ "" }}version{{ -"" }}` (for example, `bazel_skylib+1.0.3`) or `{{ "" }}module_name{{ -"" }}+` (for example, `bazel_features+`), depending on whether there are -multiple versions of the module in the entire dependency graph (see -[`multiple_version_override`](/rules/lib/globals/module#multiple_version_override)). -Note that **the canonical name format** is not an API you should depend on and -**is subject to change at any time**. Instead of hard-coding the canonical name, -use a supported way to get it directly from Bazel: - -* In BUILD and `.bzl` files, use - [`Label.repo_name`](/rules/lib/builtins/Label#repo_name) on a `Label` instance - constructed from a label string given by the apparent name of the repo, e.g., - `Label("@bazel_skylib").repo_name`. -* When looking up runfiles, use - [`$(rlocationpath ...)`](https://bazel.build/reference/be/make-variables#predefined_label_variables) - or one of the runfiles libraries in - `@bazel_tools//tools/{bash,cpp,java}/runfiles` or, for a ruleset `rules_foo`, - in `@rules_foo//foo/runfiles`. -* When interacting with Bazel from an external tool such as an IDE or language - server, use the `bazel mod dump_repo_mapping` command to get the mapping from - apparent names to canonical names for a given set of repositories. - -[Module extensions](/external/extension) can also introduce additional repos -into the visible scope of a module. diff --git a/8.4.2/help.mdx b/8.4.2/help.mdx deleted file mode 100644 index b2976e6..0000000 --- a/8.4.2/help.mdx +++ /dev/null @@ -1,53 +0,0 @@ ---- -title: 'Getting Help' ---- - - - -This page lists Bazel resources beyond the documentation and covers how to get -support from the Bazel team and community. - -## Search existing material - -In addition to the documentation, you can find helpful information by searching: - -* [Bazel user group](https://groups.google.com/g/bazel-discuss) -* [Bazel GitHub Discussions](https://github.com/bazelbuild/bazel/discussions) -* [Bazel blog](https://blog.bazel.build/) -* [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* [`awesome-bazel` resources](https://github.com/jin/awesome-bazel) - -## Watch videos - -There are recordings of Bazel talks at various conferences, such as: - -* Bazel’s annual conference, BazelCon: - * [BazelCon 2023](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsefrwb_ySGRi_bvQejpO_Tj) - * [BazelCon 2022](https://youtube.com/playlist?list=PLxNYxgaZ8RsdH4GCIZ69dzxQCOPyuNlpF) - * [BazelCon 2021](https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsc3auKhtfIB4qXAYf7whEux) - * [BazelCon 2020](https://www.youtube.com/playlist?list=PLxNYxgaZ8RseRybXNbopHRv6-wGmFr04n) - * [BazelCon 2019](https://youtu.be/eymphDN7No4?t=PLxNYxgaZ8Rsf-7g43Z8LyXct9ax6egdSj) - * [BazelCon 2018](https://youtu.be/DVYRg6b2UBo?t=PLxNYxgaZ8Rsd3Nmvl1W1B4I6nK1674ezp) - * [BazelCon 2017](https://youtu.be/3eFllvz8_0k?t=PLxNYxgaZ8RseY0KmkXQSt0StE71E7yizG) -* Bazel day on [Google Open Source Live](https://opensourcelive.withgoogle.com/events/bazel) - - -## Ask the Bazel community - -If there are no existing answers, you can ask the community by: - -* Emailing the [Bazel user group](https://groups.google.com/g/bazel-discuss) -* Starting a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions) -* Asking a question on [Stack Overflow](https://stackoverflow.com/questions/tagged/bazel) -* Chatting with other Bazel contributors on [Slack](https://slack.bazel.build/) -* Consulting a [Bazel community expert](/community/experts) - -## Understand Bazel's support level - -Please read the [release page](/release) to understand Bazel's release model and -what level of support Bazel provides. - -## File a bug - -If you encounter a bug or want to request a feature, file a [GitHub -Issue](https://github.com/bazelbuild/bazel/issues). diff --git a/8.4.2/install/bazelisk.mdx b/8.4.2/install/bazelisk.mdx deleted file mode 100644 index a3189cb..0000000 --- a/8.4.2/install/bazelisk.mdx +++ /dev/null @@ -1,59 +0,0 @@ ---- -title: 'Installing / Updating Bazel using Bazelisk' ---- - - - -## Installing Bazel - -[Bazelisk](https://github.com/bazelbuild/bazelisk) is the -recommended way to install Bazel on Ubuntu, Windows, and macOS. It automatically -downloads and installs the appropriate version of Bazel. Use Bazelisk if you -need to switch between different versions of Bazel depending on the current -working directory, or to always keep Bazel updated to the latest release. - -For more details, see -[the official README](https://github.com/bazelbuild/bazelisk/blob/master/README.md). - -## Updating Bazel - -Bazel has a [backward compatibility policy](/release/backward-compatibility) -(see [guidance for rolling out incompatible -changes](/contribute/breaking-changes) if you -are the author of one). That page summarizes best practices on how to test and -migrate your project with upcoming incompatible changes and how to provide -feedback to the incompatible change authors. - -### Managing Bazel versions with Bazelisk - -[Bazelisk](https://github.com/bazelbuild/bazelisk) helps you manage -Bazel versions. - -Bazelisk can: - -* Auto-update Bazel to the latest LTS or rolling release. -* Build the project with a Bazel version specified in the .bazelversion - file. Check in that file into your version control to ensure reproducibility - of your builds. -* Help migrate your project for incompatible changes (see above) -* Easily try release candidates - -### Recommended migration process - -Within minor updates to any LTS release, any -project can be prepared for the next release without breaking -compatibility with the current release. However, there may be -backward-incompatible changes between major LTS versions. - -Follow this process to migrate from one major version to another: - -1. Read the release notes to get advice on how to migrate to the next version. -1. Major incompatible changes should have an associated `--incompatible_*` flag - and a corresponding GitHub issue: - * Migration guidance is available in the associated GitHub issue. - * Tooling is available for some of incompatible changes migration. For - example, [buildifier](https://github.com/bazelbuild/buildtools/releases). - * Report migration problems by commenting on the associated GitHub issue. - -After migration, you can continue to build your projects without worrying about -backward-compatibility until the next major release. diff --git a/8.4.2/install/compile-source.mdx b/8.4.2/install/compile-source.mdx deleted file mode 100644 index a228b22..0000000 --- a/8.4.2/install/compile-source.mdx +++ /dev/null @@ -1,299 +0,0 @@ ---- -title: 'Compiling Bazel from Source' ---- - - - -This page describes how to install Bazel from source and provides -troubleshooting tips for common issues. - -To build Bazel from source, you can do one of the following: - -* Build it [using an existing Bazel binary](#build-bazel-using-bazel) - -* Build it [without an existing Bazel binary](#bootstrap-bazel) which is known - as _bootstrapping_. - -## Build Bazel using Bazel - -### Summary - -1. Get the latest Bazel release from the - [GitHub release page](https://github.com/bazelbuild/bazel/releases) or with - [Bazelisk](https://github.com/bazelbuild/bazelisk). - -2. [Download Bazel's sources from GitHub](https://github.com/bazelbuild/bazel/archive/master.zip) - and extract somewhere. - Alternatively you can git clone the source tree from https://github.com/bazelbuild/bazel - -3. Install the same prerequisites as for bootstrapping (see - [for Unix-like systems](#bootstrap-unix-prereq) or - [for Windows](#bootstrap-windows-prereq)) - -4. Build a development build of Bazel using Bazel: - `bazel build //src:bazel-dev` (or `bazel build //src:bazel-dev.exe` on - Windows) - -5. The resulting binary is at `bazel-bin/src/bazel-dev` - (or `bazel-bin\src\bazel-dev.exe` on Windows). You can copy it wherever you - like and use immediately without further installation. - -Detailed instructions follow below. - -### Step 1: Get the latest Bazel release - -**Goal**: Install or download a release version of Bazel. Make sure you can run -it by typing `bazel` in a terminal. - -**Reason**: To build Bazel from a GitHub source tree, you need a pre-existing -Bazel binary. You can install one from a package manager or download one from -GitHub. See [Installing Bazel](/install). (Or you can [build from -scratch (bootstrap)](#bootstrap-bazel).) - -**Troubleshooting**: - -* If you cannot run Bazel by typing `bazel` in a terminal: - - * Maybe your Bazel binary's directory is not on the PATH. - - This is not a big problem. Instead of typing `bazel`, you will need to - type the full path. - - * Maybe the Bazel binary itself is not called `bazel` (on Unixes) or - `bazel.exe` (on Windows). - - This is not a big problem. You can either rename the binary, or type the - binary's name instead of `bazel`. - - * Maybe the binary is not executable (on Unixes). - - You must make the binary executable by running `chmod +x /path/to/bazel`. - -### Step 2: Download Bazel's sources from GitHub - -If you are familiar with Git, then just git clone https://github.com/bazelbuild/bazel - -Otherwise: - -1. Download the - [latest sources as a zip file](https://github.com/bazelbuild/bazel/archive/master.zip). - -2. Extract the contents somewhere. - - For example create a `bazel-src` directory under your home directory and - extract there. - -### Step 3: Install prerequisites - -Install the same prerequisites as for bootstrapping (see below) -- JDK, C++ -compiler, MSYS2 (if you are building on Windows), etc. - -### Step 4a: Build Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Build Bazel on Windows](#build-bazel-on-windows). - -**Goal**: Run Bazel to build a custom Bazel binary (`bazel-bin/src/bazel-dev`). - -**Instructions**: - -1. Start a Bash terminal - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd ~/bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev - - Alternatively you can run `bazel build //src:bazel --compilation_mode=opt` - to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin/src/bazel-dev` (or `bazel-bin/src/bazel`). - -### Step 4b: Build Bazel on Windows - -For instructions for Unix-like systems, see -[Ubuntu Linux, macOS, and other Unix-like systems](#build-bazel-on-unixes). - -**Goal**: Run Bazel to build a custom Bazel binary -(`bazel-bin\src\bazel-dev.exe`). - -**Instructions**: - -1. Start Command Prompt (Start Menu > Run > "cmd.exe") - -2. `cd` into the directory where you extracted (or cloned) Bazel's sources. - - For example if you extracted the sources under your home directory, run: - - cd %USERPROFILE%\bazel-src - -3. Build Bazel from source: - - bazel build //src:bazel-dev.exe - - Alternatively you can run `bazel build //src:bazel.exe - --compilation_mode=opt` to yield a smaller binary but it's slower to build. - - You can build with `--stamp --embed_label=X.Y.Z` flag to embed a Bazel - version for the binary so that `bazel --version` outputs the given version. - -4. The output will be at `bazel-bin\src\bazel-dev.exe` (or - `bazel-bin\src\bazel.exe`). - -### Step 5: Install the built binary - -Actually, there's nothing to install. - -The output of the previous step is a self-contained Bazel binary. You can copy -it to any directory and use immediately. (It's useful if that directory is on -your PATH so that you can run "bazel" everywhere.) - ---- - -## Build Bazel from scratch (bootstrapping) - -You can also build Bazel from scratch, without using an existing Bazel binary. - -### Step 1: Download Bazel's sources (distribution archive) - -(This step is the same for all platforms.) - -1. Download `bazel--dist.zip` from - [GitHub](https://github.com/bazelbuild/bazel/releases), for example - `bazel-0.28.1-dist.zip`. - - **Attention**: - - - There is a **single, architecture-independent** distribution archive. - There are no architecture-specific or OS-specific distribution archives. - - These sources are **not the same as the GitHub source tree**. You - have to use the distribution archive to bootstrap Bazel. You cannot - use a source tree cloned from GitHub. (The distribution archive contains - generated source files that are required for bootstrapping and are not part - of the normal Git source tree.) - -2. Unpack the distribution archive somewhere on disk. - - You should verify the signature made by Bazel's - [release key](https://bazel.build/bazel-release.pub.gpg) 3D5919B448457EE0. - -### Step 2a: Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems - -For instructions for Windows, see [Bootstrap Bazel on Windows](#bootstrap-windows). - -#### 2.1. Install the prerequisites - -* **Bash** - -* **zip, unzip** - -* **C++ build toolchain** - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. - -For example on Ubuntu Linux you can install these requirements using the -following command: - -```sh -sudo apt-get install build-essential openjdk-21-jdk python zip unzip -``` - -#### 2.2. Bootstrap Bazel on Unix - -1. Open a shell or Terminal window. - -3. `cd` to the directory where you unpacked the distribution archive. - -3. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" bash ./compile.sh`. - -The compiled output is placed into `output/bazel`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on your -`PATH` (such as `/usr/local/bin` on Linux). - -To build the `bazel` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -### Step 2b: Bootstrap Bazel on Windows - -For instructions for Unix-like systems, see -[Bootstrap Bazel on Ubuntu Linux, macOS, and other Unix-like systems](#bootstrap-unix). - -#### 2.1. Install the prerequisites - -* [MSYS2 shell](https://msys2.github.io/) - -* **The MSYS2 packages for zip and unzip.** Run the following command in the MSYS2 shell: - - ``` - pacman -S zip unzip patch - ``` - -* **The Visual C++ compiler.** Install the Visual C++ compiler either as part - of Visual Studio 2015 or newer, or by installing the latest [Build Tools - for Visual Studio 2017](https://aka.ms/BuildTools). - -* **JDK.** Version 21 is required. - -* **Python**. Versions 2 and 3 are supported, installing one of them is - enough. You need the Windows-native version (downloadable from - [https://www.python.org](https://www.python.org)). Versions installed via - pacman in MSYS2 will not work. - -#### 2.2. Bootstrap Bazel on Windows - -1. Open the MSYS2 shell. - -2. Set the following environment variables: - * Either `BAZEL_VS` or `BAZEL_VC` (they are *not* the same): Set to the - path to the Visual Studio directory (BAZEL\_VS) or to the Visual - C++ directory (BAZEL\_VC). Setting one of them is enough. - * `BAZEL_SH`: Path of the MSYS2 `bash.exe`. See the command in the - examples below. - - Do not set this to `C:\Windows\System32\bash.exe`. (You have that file - if you installed Windows Subsystem for Linux.) Bazel does not support - this version of `bash.exe`. - * `PATH`: Add the Python directory. - * `JAVA_HOME`: Set to the JDK directory. - - **Example** (using BAZEL\_VS): - - export BAZEL_VS="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - - or (using BAZEL\_VC): - - export BAZEL_VC="C:/Program Files (x86)/Microsoft Visual Studio/2017/BuildTools/VC" - export BAZEL_SH="$(cygpath -m $(realpath $(which bash)))" - export PATH="/c/python27:$PATH" - export JAVA_HOME="C:/Program Files/Java/jdk-21" - -3. `cd` to the directory where you unpacked the distribution archive. - -4. Run the compilation script: `env EXTRA_BAZEL_ARGS="--tool_java_runtime_version=local_jdk" ./compile.sh` - -The compiled output is placed into `output/bazel.exe`. This is a self-contained -Bazel binary, without an embedded JDK. You can copy it anywhere or use it -in-place. For convenience, copy this binary to a directory that's on -your `PATH`. - -To build the `bazel.exe` binary in a reproducible way, also set -[`SOURCE_DATE_EPOCH`](https://reproducible-builds.org/specs/source-date-epoch/) -in the "Run the compilation script" step. - -You don't need to run Bazel from the MSYS2 shell. You can run Bazel from the -Command Prompt (`cmd.exe`) or PowerShell. diff --git a/8.4.2/install/completion.mdx b/8.4.2/install/completion.mdx deleted file mode 100644 index 1d1d1b7..0000000 --- a/8.4.2/install/completion.mdx +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: 'Command-Line Completion' ---- - - - -You can enable command-line completion (also known as tab-completion) in Bash -and Zsh. This lets you tab-complete command names, flags names and flag values, -and target names. - -## Bash - -Bazel comes with a Bash completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Bash completion script is - already installed in `/etc/bash_completion.d`. - -* From Homebrew, then you're done -- the Bash completion script is - already installed in `$(brew --prefix)/etc/bash_completion.d`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - 2. Do one of the following: - * Either copy this file to your completion directory (if you have - one). - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory. - * Or source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -* Via [bootstrapping](/install/compile-source), then: - 1. Emit the completion script into a file: - - ``` - bazel help completion bash > bazel-complete.bash - ``` - 2. Do one of the following: - * Copy this file to your completion directory, if you have - one. - - Example: on Ubuntu this is the `/etc/bash_completion.d` directory - * Copy it somewhere on your local disk, such as to `$HOME`, and - source the completion file from Bash's RC file. - - Add a line similar to the one below to your `~/.bashrc` (on Ubuntu) - or `~/.bash_profile` (on macOS), using the path to your completion - file's absolute path: - - ``` - source /path/to/bazel-complete.bash - ``` - -## Zsh - -Bazel comes with a Zsh completion script. - -If you installed Bazel: - -* From the APT repository, then you're done -- the Zsh completion script is - already installed in `/usr/share/zsh/vendor-completions`. - - > If you have a heavily customized `.zshrc` and the autocomplete - > does not function, try one of the following solutions: - > - > Add the following to your `.zshrc`: - > - > ``` - > zstyle :compinstall filename '/home/tradical/.zshrc' - > - > autoload -Uz compinit - > compinit - > ``` - > - > or - > - > Follow the instructions - > [here](https://stackoverflow.com/questions/58331977/bazel-tab-auto-complete-in-zsh-not-working) - > - > If you are using `oh-my-zsh`, you may want to install and enable - > the `zsh-autocomplete` plugin. If you'd prefer not to, use one of the - > solutions described above. - -* From Homebrew, then you're done -- the Zsh completion script is - already installed in `$(brew --prefix)/share/zsh/site-functions`. - -* From the installer downloaded from GitHub, then: - 1. Locate the absolute path of the completion file. The installer copied it - to the `bin` directory. - - Example: if you ran the installer with `--user`, this will be - `$HOME/.bazel/bin`. If you ran the installer as root, this will be - `/usr/local/lib/bazel/bin`. - - 2. Add this script to a directory on your `$fpath`: - - ``` - fpath[1,0]=~/.zsh/completion/ - mkdir -p ~/.zsh/completion/ - cp /path/from/above/step/_bazel ~/.zsh/completion - ``` - - You may have to call `rm -f ~/.zcompdump; compinit` - the first time to make it work. - - 3. Optionally, add the following to your .zshrc. - - ``` - # This way the completion script does not have to parse Bazel's options - # repeatedly. The directory in cache-path must be created manually. - zstyle ':completion:*' use-cache on - zstyle ':completion:*' cache-path ~/.zsh/cache - ``` diff --git a/8.4.2/install/docker-container.mdx b/8.4.2/install/docker-container.mdx deleted file mode 100644 index 3a5d017..0000000 --- a/8.4.2/install/docker-container.mdx +++ /dev/null @@ -1,135 +0,0 @@ ---- -title: 'Getting Started with Bazel Docker Container' ---- - - - -This page provides details on the contents of the Bazel container, how to build -the [abseil-cpp](https://github.com/abseil/abseil-cpp) project using Bazel -inside the Bazel container, and how to build this project directly -from the host machine using the Bazel container with directory mounting. - -## Build Abseil project from your host machine with directory mounting - -The instructions in this section allow you to build using the Bazel container -with the sources checked out in your host environment. A container is started up -for each build command you execute. Build results are cached in your host -environment so they can be reused across builds. - -Clone the project to a directory in your host machine. - -```posix-terminal -git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git /src/workspace -``` - -Create a folder that will have cached results to be shared across builds. - -```posix-terminal -mkdir -p /tmp/build_output/ -``` - -Use the Bazel container to build the project and make the build -outputs available in the output folder in your host machine. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` build -flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -docker run \ - -e USER="$(id -u)" \ - -u="$(id -u)" \ - -v /src/workspace:/src/workspace \ - -v /tmp/build_output:/tmp/build_output \ - -w /src/workspace \ - gcr.io/bazel-public/bazel:latest \ - --output_user_root=/tmp/build_output \ - build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Build Abseil project from inside the container - -The instructions in this section allow you to build using the Bazel container -with the sources inside the container. By starting a container at the beginning -of your development workflow and doing changes in the worskpace within the -container, build results will be cached. - -Start a shell in the Bazel container: - -```posix-terminal -docker run --interactive --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -``` - -Each container id is unique. In the instructions below, the container was 5a99103747c6. - -Clone the project. - -```posix-terminal -ubuntu@5a99103747c6:~$ git clone --depth 1 --branch 20220623.1 https://github.com/abseil/abseil-cpp.git && cd abseil-cpp/ -``` - -Do a regular build. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build //absl/... -``` - -Build the project with sanitizers by adding the `--config={{ "" }}asan{{ "" }}|{{ "" }}tsan{{ "" }}|{{ "" }}msan{{ "" }}` -build flag to select AddressSanitizer (asan), ThreadSanitizer (tsan) or -MemorySanitizer (msan) accordingly. - -```posix-terminal -ubuntu@5a99103747c6:~/abseil-cpp$ bazel build --config={asan | tsan | msan} -- //absl/... -//absl/types:variant_test -``` - -## Explore the Bazel container - -If you haven't already, start an interactive shell inside the Bazel container. - -```posix-terminal -docker run -it --entrypoint=/bin/bash gcr.io/bazel-public/bazel:latest -ubuntu@5a99103747c6:~$ -``` - -Explore the container contents. - -```posix-terminal -ubuntu@5a99103747c6:~$ gcc --version -gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 -Copyright (C) 2019 Free Software Foundation, Inc. -This is free software; see the source for copying conditions. There is NO -warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -ubuntu@5a99103747c6:~$ java -version -openjdk version "1.8.0_362" -OpenJDK Runtime Environment (build 1.8.0_362-8u372-ga~us1-0ubuntu1~20.04-b09) -OpenJDK 64-Bit Server VM (build 25.362-b09, mixed mode) - -ubuntu@5a99103747c6:~$ python -V -Python 3.8.10 - -ubuntu@5a99103747c6:~$ bazel version -WARNING: Invoking Bazel in batch mode since it is not invoked from within a workspace (below a directory having a WORKSPACE file). -Extracting Bazel installation... -Build label: 6.2.1 -Build target: bazel-out/k8-opt/bin/src/main/java/com/google/devtools/build/lib/bazel/BazelServer_deploy.jar -Build time: Fri Jun 2 16:59:58 2023 (1685725198) -Build timestamp: 1685725198 -Build timestamp as int: 1685725198 -``` - -## Explore the Bazel Dockerfile - -If you want to check how the Bazel Docker image is built, you can find its Dockerfile at [bazelbuild/continuous-integration/bazel/oci](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). diff --git a/8.4.2/install/ide.mdx b/8.4.2/install/ide.mdx deleted file mode 100644 index f70919b..0000000 --- a/8.4.2/install/ide.mdx +++ /dev/null @@ -1,122 +0,0 @@ ---- -title: 'Integrating Bazel with IDEs' ---- - - - -This page covers how to integrate Bazel with IDEs, such as IntelliJ, Android -Studio, and CLion (or build your own IDE plugin). It also includes links to -installation and plugin details. - -IDEs integrate with Bazel in a variety of ways, from features that allow Bazel -executions from within the IDE, to awareness of Bazel structures such as syntax -highlighting of the `BUILD` files. - -If you are interested in developing an editor or IDE plugin for Bazel, please -join the `#ide` channel on the [Bazel Slack](https://slack.bazel.build) or start -a discussion on [GitHub](https://github.com/bazelbuild/bazel/discussions). - -## IDEs and editors - -### IntelliJ, Android Studio, and CLion - -[Official plugin](http://ij.bazel.build) for IntelliJ, Android Studio, and -CLion. The plugin is [open source](https://github.com/bazelbuild/intellij). - -This is the open source version of the plugin used internally at Google. - -Features: - -* Interop with language-specific plugins. Supported languages include Java, - Scala, and Python. -* Import `BUILD` files into the IDE with semantic awareness of Bazel targets. -* Make your IDE aware of Starlark, the language used for Bazel's `BUILD` and - `.bzl`files -* Build, test, and execute binaries directly from the IDE -* Create configurations for debugging and running binaries. - -To install, go to the IDE's plugin browser and search for `Bazel`. - -To manually install older versions, download the zip files from JetBrains' -Plugin Repository and install the zip file from the IDE's plugin browser: - -* [Android Studio - plugin](https://plugins.jetbrains.com/plugin/9185-android-studio-with-bazel) -* [IntelliJ - plugin](https://plugins.jetbrains.com/plugin/8609-intellij-with-bazel) -* [CLion plugin](https://plugins.jetbrains.com/plugin/9554-clion-with-bazel) - -### Xcode - -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj), -[Tulsi](https://tulsi.bazel.build), and -[XCHammer](https://github.com/pinterest/xchammer) generate Xcode -projects from Bazel `BUILD` files. - -### Visual Studio Code - -Official plugin for VS Code. - -Features: - -* Bazel Build Targets tree -* Starlark debugger for `.bzl` files during a build (set breakpoints, step - through code, inspect variables, and so on) - -Find [the plugin on the Visual Studio -marketplace](https://marketplace.visualstudio.com/items?itemName=BazelBuild.vscode-bazel). -The plugin is [open source](https://github.com/bazelbuild/vscode-bazel). - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Atom - -Find the [`language-bazel` package](https://atom.io/packages/language-bazel) -on the Atom package manager. - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Vim - -See [`bazelbuild/vim-bazel` on GitHub](https://github.com/bazelbuild/vim-bazel) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Emacs - -See [`bazelbuild/bazel-emacs-mode` on -GitHub](https://github.com/bazelbuild/emacs-bazel-mode) - -See also: [Autocomplete for Source Code](#autocomplete-for-source-code) - -### Visual Studio - -[Lavender](https://github.com/tmandry/lavender) is an experimental project for -generating Visual Studio projects that use Bazel for building. - -### Eclipse - -[Bazel Eclipse Feature](https://github.com/salesforce/bazel-eclipse) -is a set of plugins for importing Bazel packages into an Eclipse workspace as -Eclipse projects. - -## Autocomplete for Source Code - -### C Language Family (C++, C, Objective-C, and Objective-C++) - -[`hedronvision/bazel-compile-commands-extractor`](https://github.com/hedronvision/bazel-compile-commands-extractor) enables autocomplete, smart navigation, quick fixes, and more in a wide variety of extensible editors, including VSCode, Vim, Emacs, Atom, and Sublime. It lets language servers, like clangd and ccls, and other types of tooling, draw upon Bazel's understanding of how `cc` and `objc` code will be compiled, including how it configures cross-compilation for other platforms. - -### Java - -[`georgewfraser/java-language-server`](https://github.com/georgewfraser/java-language-server) - Java Language Server (LSP) with support for Bazel-built projects - -## Automatically run build and test on file change - -[Bazel watcher](https://github.com/bazelbuild/bazel-watcher) is a -tool for building Bazel targets when source files change. - -## Building your own IDE plugin - -Read the [**IDE support** blog -post](https://blog.bazel.build/2016/06/10/ide-support.html) to learn more about -the Bazel APIs to use when building an IDE plugin. diff --git a/8.4.2/install/index.mdx b/8.4.2/install/index.mdx deleted file mode 100644 index 10f53c4..0000000 --- a/8.4.2/install/index.mdx +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: 'Installing Bazel' ---- - - - -This page describes the various platforms supported by Bazel and links -to the packages for more details. - -[Bazelisk](/install/bazelisk) is the recommended way to install Bazel on [Ubuntu Linux](/install/ubuntu), [macOS](/install/os-x), and [Windows](/install/windows). - -You can find available Bazel releases on our [release page](/release). - -## Community-supported packages - -Bazel community members maintain these packages. The Bazel team doesn't -officially support them. Contact the package maintainers for support. - -* [Arch Linux][arch] -* [CentOS 6](https://github.com/sub-mod/bazel-builds) -* [Debian](https://qa.debian.org/developer.php?email=team%2Bbazel%40tracker.debian.org) -* [FreeBSD](https://www.freshports.org/devel/bazel) -* [Gentoo](https://packages.gentoo.org/packages/dev-util/bazel) -* [Homebrew](https://formulae.brew.sh/formula/bazel) -* [Nixpkgs](https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/tools/build-managers/bazel) -* [openSUSE](/install/suse) -* [Parabola](https://www.parabola.nu/packages/?q=bazel) -* [Scoop](https://github.com/scoopinstaller/scoop-main/blob/master/bucket/bazel.json) -* [Raspberry Pi](https://github.com/koenvervloesem/bazel-on-arm/blob/master/README.md) - -## Community-supported architectures - -* [ppc64el](https://ftp2.osuosl.org/pub/ppc64el/bazel/) - -For other platforms, you can try to [compile from source](/install/compile-source). - -[arch]: https://archlinux.org/packages/extra/x86_64/bazel/ diff --git a/8.4.2/install/os-x.mdx b/8.4.2/install/os-x.mdx deleted file mode 100644 index 9a0f3f8..0000000 --- a/8.4.2/install/os-x.mdx +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: 'Installing Bazel on macOS' ---- - - - -This page describes how to install Bazel on macOS and set up your environment. - -You can install Bazel on macOS using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use Homebrew](#install-on-mac-os-x-homebrew) -* [Use the binary installer](#install-with-installer-mac-os-x) -* [Compile Bazel from source](/install/compile-source) - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -

Installing using Homebrew

- -### Step 1: Install Homebrew on macOS - -Install [Homebrew](https://brew.sh/) (a one-time step): - -```posix-terminal -/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" -``` - -### Step 2: Install Bazel via Homebrew - -Install the Bazel package via Homebrew as follows: - -```posix-terminal -brew install bazel -``` - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` - -Once installed, you can upgrade to a newer version of Bazel using the -following command: - -```posix-terminal -brew upgrade bazel -``` - -

Installing using the binary installer

- -The binary installers are on Bazel's -[GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary. Some additional libraries -must also be installed for Bazel to work. - -### Step 1: Install Xcode command line tools - -If you don't intend to use `ios_*` rules, it is sufficient to install the Xcode -command line tools package by using `xcode-select`: - -```posix-terminal -xcode-select --install -``` - -Otherwise, for `ios_*` rule support, you must have Xcode 6.1 or later with iOS -SDK 8.1 installed on your system. - -Download Xcode from the -[App Store](https://apps.apple.com/us/app/xcode/id497799835) or the -[Apple Developer site](https://developer.apple.com/download/more/?=xcode). - -Once Xcode is installed, accept the license agreement for all users with the -following command: - -```posix-terminal -sudo xcodebuild -license accept -``` - -### Step 2: Download the Bazel installer - -Next, download the Bazel binary installer named -`bazel--installer-darwin-x86_64.sh` from the -[Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -**On macOS Catalina or newer (macOS >= 11)**, due to Apple's new app signing requirements, -you need to download the installer from the terminal using `curl`, replacing -the version variable with the Bazel version you want to download: - -```posix-terminal -export BAZEL_VERSION=5.2.0 - -curl -fLO "https://github.com/bazelbuild/bazel/releases/download/{{ '' }}$BAZEL_VERSION{{ '' }}/bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" -``` - -This is a temporary workaround until the macOS release flow supports -signing ([#9304](https://github.com/bazelbuild/bazel/issues/9304)). - -### Step 3: Run the installer - -Run the Bazel installer as follows: - -```posix-terminal -chmod +x "bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh" - -./bazel-{{ '' }}$BAZEL_VERSION{{ '' }}-installer-darwin-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -If you are **on macOS Catalina or newer (macOS >= 11)** and get an error that _**“bazel-real” cannot be -opened because the developer cannot be verified**_, you need to re-download -the installer from the terminal using `curl` as a workaround; see Step 2 above. - -### Step 4: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `{{ '' }}HOME{{ '' }}/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="{{ '' }}PATH{{ '' }}:{{ '' }}HOME{{ '' }}/bin" -``` - -You can also add this command to your `~/.bashrc`, `~/.zshrc`, or `~/.profile` -file. - -All set! You can confirm Bazel is installed successfully by running the -following command: - -```posix-terminal -bazel --version -``` -To update to a newer release of Bazel, download and install the desired version. - diff --git a/8.4.2/install/suse.mdx b/8.4.2/install/suse.mdx deleted file mode 100644 index a4d2e9e..0000000 --- a/8.4.2/install/suse.mdx +++ /dev/null @@ -1,35 +0,0 @@ ---- -title: 'Installing Bazel on openSUSE Tumbleweed & Leap' ---- - - - -This page describes how to install Bazel on openSUSE Tumbleweed and Leap. - -`NOTE:` The Bazel team does not officially maintain openSUSE support. For issues -using Bazel on openSUSE please file a ticket at [bugzilla.opensuse.org](https://bugzilla.opensuse.org/). - -Packages are provided for openSUSE Tumbleweed and Leap. You can find all -available Bazel versions via openSUSE's [software search](https://software.opensuse.org/search?utf8=%E2%9C%93&baseproject=ALL&q=bazel). - -The commands below must be run either via `sudo` or while logged in as `root`. - -## Installing Bazel on openSUSE - -Run the following commands to install the package. If you need a specific -version, you can install it via the specific `bazelXXX` package, otherwise, -just `bazel` is enough: - -To install the latest version of Bazel, run: - -```posix-terminal -zypper install bazel -``` - -You can also install a specific version of Bazel by specifying the package -version with `bazel{{ '' }}version{{ '' }}`. For example, to install -Bazel 4.2, run: - -```posix-terminal -zypper install bazel4.2 -``` diff --git a/8.4.2/install/ubuntu.mdx b/8.4.2/install/ubuntu.mdx deleted file mode 100644 index a31bd2f..0000000 --- a/8.4.2/install/ubuntu.mdx +++ /dev/null @@ -1,166 +0,0 @@ ---- -title: 'Installing Bazel on Ubuntu' ---- - - - -This page describes the options for installing Bazel on Ubuntu. -It also provides links to the Bazel completion scripts and the binary installer, -if needed as a backup option (for example, if you don't have admin access). - -Supported Ubuntu Linux platforms: - -* 22.04 (LTS) -* 20.04 (LTS) -* 18.04 (LTS) - -Bazel should be compatible with other Ubuntu releases and Debian -"stretch" and above, but is untested and not guaranteed to work. - -Install Bazel on Ubuntu using one of the following methods: - -* *Recommended*: [Use Bazelisk](/install/bazelisk) -* [Use our custom APT repository](#install-on-ubuntu) -* [Use the binary installer](#binary-installer) -* [Use the Bazel Docker container](#docker-container) -* [Compile Bazel from source](/install/compile-source) - -**Note:** For Arm-based systems, the APT repository does not contain an `arm64` -release, and there is no binary installer available. Either use Bazelisk or -compile from source. - -Bazel comes with two completion scripts. After installing Bazel, you can: - -* Access the [bash completion script](/install/completion#bash) -* Install the [zsh completion script](/install/completion#zsh) - -## Using Bazel's apt repository - -### Step 1: Add Bazel distribution URI as a package source - -**Note:** This is a one-time setup step. - -```posix-terminal -sudo apt install apt-transport-https curl gnupg -y - -curl -fsSL https://bazel.build/bazel-release.pub.gpg | gpg --dearmor >bazel-archive-keyring.gpg - -sudo mv bazel-archive-keyring.gpg /usr/share/keyrings - -echo "deb [arch=amd64 signed-by=/usr/share/keyrings/bazel-archive-keyring.gpg] https://storage.googleapis.com/bazel-apt stable jdk1.8" | sudo tee /etc/apt/sources.list.d/bazel.list -``` - -The component name "jdk1.8" is kept only for legacy reasons and doesn't relate -to supported or included JDK versions. Bazel releases are Java-version agnostic. -Changing the "jdk1.8" component name would break existing users of the repo. - -### Step 2: Install and update Bazel - -```posix-terminal -sudo apt update && sudo apt install bazel -``` - -Once installed, you can upgrade to a newer version of Bazel as part of your normal system updates: - -```posix-terminal -sudo apt update && sudo apt full-upgrade -``` - -The `bazel` package always installs the latest stable version of Bazel. You -can install specific, older versions of Bazel in addition to the latest one, -such as this: - -```posix-terminal -sudo apt install bazel-1.0.0 -``` - -This installs Bazel 1.0.0 as `/usr/bin/bazel-1.0.0` on your system. This -can be useful if you need a specific version of Bazel to build a project, for -example because it uses a `.bazelversion` file to explicitly state with which -Bazel version it should be built. - -Optionally, you can set `bazel` to a specific version by creating a symlink: - -```posix-terminal -sudo ln -s /usr/bin/bazel-1.0.0 /usr/bin/bazel - -bazel --version # 1.0.0 -``` - -### Step 3: Install a JDK (optional) - -Bazel includes a private, bundled JRE as its runtime and doesn't require you to -install any specific version of Java. - -However, if you want to build Java code using Bazel, you have to install a JDK. - -```posix-terminal -sudo apt install default-jdk -``` - -## Using the binary installer - -Generally, you should use the apt repository, but the binary installer -can be useful if you don't have admin permissions on your machine or -can't add custom repositories. - -The binary installers can be downloaded from Bazel's [GitHub releases page](https://github.com/bazelbuild/bazel/releases). - -The installer contains the Bazel binary and extracts it into your `$HOME/bin` -folder. Some additional libraries must be installed manually for Bazel to work. - -### Step 1: Install required packages - -Bazel needs a C++ compiler and unzip / zip in order to work: - -```posix-terminal -sudo apt install g++ unzip zip -``` - -If you want to build Java code using Bazel, install a JDK: - -```posix-terminal -sudo apt-get install default-jdk -``` - -### Step 2: Run the installer - -Next, download the Bazel binary installer named `bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh` -from the [Bazel releases page on GitHub](https://github.com/bazelbuild/bazel/releases). - -Run it as follows: - -```posix-terminal -chmod +x bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh - -./bazel-{{ '' }}version{{ '' }}-installer-linux-x86_64.sh --user -``` - -The `--user` flag installs Bazel to the `$HOME/bin` directory on your system and -sets the `.bazelrc` path to `$HOME/.bazelrc`. Use the `--help` command to see -additional installation options. - -### Step 3: Set up your environment - -If you ran the Bazel installer with the `--user` flag as above, the Bazel -executable is installed in your `$HOME/bin` directory. -It's a good idea to add this directory to your default paths, as follows: - -```posix-terminal -export PATH="$PATH:$HOME/bin" -``` - -You can also add this command to your `~/.bashrc` or `~/.zshrc` file to make it -permanent. - -## Using the Bazel Docker container - -We publish Docker container with Bazel installed for each Bazel version at `gcr.io/bazel-public/bazel`. -You can use the Docker container as follows: - -``` -$ docker pull gcr.io/bazel-public/bazel: -``` - -The Docker container is built by [these steps](https://github.com/bazelbuild/continuous-integration/tree/master/bazel/oci). - diff --git a/8.4.2/migrate/index.mdx b/8.4.2/migrate/index.mdx deleted file mode 100644 index 5d96c4a..0000000 --- a/8.4.2/migrate/index.mdx +++ /dev/null @@ -1,11 +0,0 @@ ---- -title: 'Migrating to Bazel' ---- - - - -This page links to migration guides for Bazel. - -* [Maven](/migrate/maven) -* [Xcode](/migrate/xcode) -* [CocoaPods](/migrate/cocoapods) diff --git a/8.4.2/migrate/maven.mdx b/8.4.2/migrate/maven.mdx deleted file mode 100644 index 38aaffc..0000000 --- a/8.4.2/migrate/maven.mdx +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: 'Migrating from Maven to Bazel' ---- - - - -This page describes how to migrate from Maven to Bazel, including the -prerequisites and installation steps. It describes the differences between Maven -and Bazel, and provides a migration example using the Guava project. - -When migrating from any build tool to Bazel, it's best to have both build tools -running in parallel until you have fully migrated your development team, CI -system, and any other relevant systems. You can run Maven and Bazel in the same -repository. - -Note: While Bazel supports downloading and publishing Maven artifacts with -[rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -, it does not directly support Maven-based plugins. Maven plugins can't be -directly run by Bazel since there's no Maven compatibility layer. - -## Before you begin - -* [Install Bazel](/install) if it's not yet installed. -* If you're new to Bazel, go through the tutorial [Introduction to Bazel: - Build Java](/start/java) before you start migrating. The tutorial explains - Bazel's concepts, structure, and label syntax. - -## Differences between Maven and Bazel - -* Maven uses top-level `pom.xml` file(s). Bazel supports multiple build files - and multiple targets per `BUILD` file, allowing for builds that are more - incremental than Maven's. -* Maven takes charge of steps for the deployment process. Bazel does not - automate deployment. -* Bazel enables you to express dependencies between languages. -* As you add new sections to the project, with Bazel you may need to add new - `BUILD` files. Best practice is to add a `BUILD` file to each new Java - package. - -## Migrate from Maven to Bazel - -The steps below describe how to migrate your project to Bazel: - -1. [Create the MODULE.bazel file](#1-build) -2. [Create one BUILD file](#2-build) -3. [Create more BUILD files](#3-build) -4. [Build using Bazel](#4-build) - -Examples below come from a migration of the [Guava -project](https://github.com/google/guava) from Maven to Bazel. The -Guava project used is release `v31.1`. The examples using Guava do not walk -through each step in the migration, but they do show the files and contents that -are generated or added manually for the migration. - -``` -$ git clone https://github.com/google/guava.git && cd guava -$ git checkout v31.1 -``` - -### 1. Create the MODULE.bazel file - -Create a file named `MODULE.bazel` at the root of your project. If your project -has no external dependencies, this file can be empty. - -If your project depends on files or packages that are not in one of the -project's directories, specify these external dependencies in the MODULE.bazel -file. You can use `rules_jvm_external` to manage dependencies from Maven. For -instructions about using this ruleset, see [the -README](https://github.com/bazelbuild/rules_jvm_external/#rules_jvm_external) -. - -#### Guava project example: external dependencies - -You can list the external dependencies of the [Guava -project](https://github.com/google/guava) with the -[`rules_jvm_external`](https://github.com/bazelbuild/rules_jvm_external) -ruleset. - -Add the following snippet to the `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_jvm_external", version = "6.2") -maven = use_extension("@rules_jvm_external//:extensions.bzl", "maven") -maven.install( - artifacts = [ - "com.google.code.findbugs:jsr305:3.0.2", - "com.google.errorprone:error_prone_annotations:2.11.0", - "com.google.j2objc:j2objc-annotations:1.3", - "org.codehaus.mojo:animal-sniffer-annotations:1.20", - "org.checkerframework:checker-qual:3.12.0", - ], - repositories = [ - "https://repo1.maven.org/maven2", - ], -) -use_repo(maven, "maven") -``` - -### 2. Create one BUILD file - -Now that you have your workspace defined and external dependencies (if -applicable) listed, you need to create `BUILD` files to describe how your -project should be built. Unlike Maven with its one `pom.xml` file, Bazel can use -many `BUILD` files to build a project. These files specify multiple build -targets, which allow Bazel to produce incremental builds. - -Add `BUILD` files in stages. Start with adding one `BUILD` file at the root of -your project and using it to do an initial build using Bazel. Then, you refine -your build by adding more `BUILD` files with more granular targets. - -1. In the same directory as your `MODULE.bazel` file, create a text file and - name it `BUILD`. - -2. In this `BUILD` file, use the appropriate rule to create one target to build - your project. Here are some tips: - - * Use the appropriate rule: - * To build projects with a single Maven module, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build projects with multiple Maven modules, use the - `java_library` rule as follows: - - ```python - java_library( - name = "everything", - srcs = glob([ - "Module1/src/main/java/**/*.java", - "Module2/src/main/java/**/*.java", - ... - ]), - resources = glob([ - "Module1/src/main/resources/**", - "Module2/src/main/resources/**", - ... - ]), - deps = ["//:all-external-targets"], - ) - ``` - - * To build binaries, use the `java_binary` rule: - - ```python - java_binary( - name = "everything", - srcs = glob(["src/main/java/**/*.java"]), - resources = glob(["src/main/resources/**"]), - deps = ["//:all-external-targets"], - main_class = "com.example.Main" - ) - ``` - - * Specify the attributes: - * `name`: Give the target a meaningful name. In the examples - above, the target is called "everything." - * `srcs`: Use globbing to list all .java files in your project. - * `resources`: Use globbing to list all resources in your project. - * `deps`: You need to determine which external dependencies your - project needs. - * Take a look at the [example below of this top-level BUILD - file](#guava-2) from the migration of the Guava project. - -3. Now that you have a `BUILD` file at the root of your project, build your - project to ensure that it works. On the command line, from your workspace - directory, use `bazel build //:everything` to build your project with Bazel. - - The project has now been successfully built with Bazel. You will need to add - more `BUILD` files to allow incremental builds of the project. - -#### Guava project example: start with one BUILD file - -When migrating the Guava project to Bazel, initially one `BUILD` file is used to -build the entire project. Here are the contents of this initial `BUILD` file in -the workspace directory: - -```python -java_library( - name = "everything", - srcs = glob([ - "guava/src/**/*.java", - "futures/failureaccess/src/**/*.java", - ]), - javacopts = ["-XepDisableAllChecks"], - deps = [ - "@maven//:com_google_code_findbugs_jsr305", - "@maven//:com_google_errorprone_error_prone_annotations", - "@maven//:com_google_j2objc_j2objc_annotations", - "@maven//:org_checkerframework_checker_qual", - "@maven//:org_codehaus_mojo_animal_sniffer_annotations", - ], -) -``` - -### 3. Create more BUILD files (optional) - -Bazel does work with just one `BUILD file`, as you saw after completing your -first build. You should still consider breaking the build into smaller chunks by -adding more `BUILD` files with granular targets. - -Multiple `BUILD` files with multiple targets will give the build increased -granularity, allowing: - -* increased incremental builds of the project, -* increased parallel execution of the build, -* better maintainability of the build for future users, and -* control over visibility of targets between packages, which can prevent - issues such as libraries containing implementation details leaking into - public APIs. - -Tips for adding more `BUILD` files: - -* You can start by adding a `BUILD` file to each Java package. Start with Java - packages that have the fewest dependencies and work you way up to packages - with the most dependencies. -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` sections of targets that depend on them. Note that the `glob()` - function does not cross package boundaries, so as the number of packages - grows the files matched by `glob()` will shrink. -* Any time you add a `BUILD` file to a `main` directory, ensure that you add a - `BUILD` file to the corresponding `test` directory. -* Take care to limit visibility properly between packages. -* To simplify troubleshooting errors in your setup of `BUILD` files, ensure - that the project continues to build with Bazel as you add each build file. - Run `bazel build //...` to ensure all of your targets still build. - -### 4. Build using Bazel - -You've been building using Bazel as you add `BUILD` files to validate the setup -of the build. - -When you have `BUILD` files at the desired granularity, you can use Bazel to -produce all of your builds. diff --git a/8.4.2/migrate/xcode.mdx b/8.4.2/migrate/xcode.mdx deleted file mode 100644 index 986cd11..0000000 --- a/8.4.2/migrate/xcode.mdx +++ /dev/null @@ -1,280 +0,0 @@ ---- -title: 'Migrating from Xcode to Bazel' ---- - - - -This page describes how to build or test an Xcode project with Bazel. It -describes the differences between Xcode and Bazel, and provides the steps for -converting an Xcode project to a Bazel project. It also provides troubleshooting -solutions to address common errors. - -## Differences between Xcode and Bazel - -* Bazel requires you to explicitly specify every build target and its - dependencies, plus the corresponding build settings via build rules. - -* Bazel requires all files on which the project depends to be present within - the workspace directory or specified as dependencies in the `MODULE.bazel` - file. - -* When building Xcode projects with Bazel, the `BUILD` file(s) become the - source of truth. If you work on the project in Xcode, you must generate a - new version of the Xcode project that matches the `BUILD` files using - [rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj/) - whenever you update the `BUILD` files. Certain changes to the `BUILD` files - such as adding dependencies to a target don't require regenerating the - project which can speed up development. If you're not using Xcode, the - `bazel build` and `bazel test` commands provide build and test capabilities - with certain limitations described later in this guide. - -## Before you begin - -Before you begin, do the following: - -1. [Install Bazel](/install) if you have not already done so. - -2. If you're not familiar with Bazel and its concepts, complete the [iOS app - tutorial](/start/ios-app)). You should understand the Bazel workspace, - including the `MODULE.bazel` and `BUILD` files, as well as the concepts of - targets, build rules, and Bazel packages. - -3. Analyze and understand the project's dependencies. - -### Analyze project dependencies - -Unlike Xcode, Bazel requires you to explicitly declare all dependencies for -every target in the `BUILD` file. - -For more information on external dependencies, see [Working with external -dependencies](/docs/external). - -## Build or test an Xcode project with Bazel - -To build or test an Xcode project with Bazel, do the following: - -1. [Create the `MODULE.bazel` file](#create-workspace) - -2. [(Experimental) Integrate SwiftPM dependencies](#integrate-swiftpm) - -3. [Create a `BUILD` file:](#create-build-file) - - a. [Add the application target](#add-app-target) - - b. [(Optional) Add the test target(s)](#add-test-target) - - c. [Add the library target(s)](#add-library-target) - -4. [(Optional) Granularize the build](#granularize-build) - -5. [Run the build](#run-build) - -6. [Generate the Xcode project with rules_xcodeproj](#generate-the-xcode-project-with-rules_xcodeproj) - -### Step 1: Create the `MODULE.bazel` file - -Create a `MODULE.bazel` file in a new directory. This directory becomes the -Bazel workspace root. If the project uses no external dependencies, this file -can be empty. If the project depends on files or packages that are not in one of -the project's directories, specify these external dependencies in the -`MODULE.bazel` file. - -Note: Place the project source code within the directory tree containing the -`MODULE.bazel` file. - -### Step 2: (Experimental) Integrate SwiftPM dependencies - -To integrate SwiftPM dependencies into the Bazel workspace with -[swift_bazel](https://github.com/cgrindel/swift_bazel), you must -convert them into Bazel packages as described in the [following -tutorial](https://chuckgrindel.com/swift-packages-in-bazel-using-swift_bazel/) -. - -Note: SwiftPM support is a manual process with many variables. SwiftPM -integration with Bazel has not been fully verified and is not officially -supported. - -### Step 3: Create a `BUILD` file - -Once you have defined the workspace and external dependencies, you need to -create a `BUILD` file that tells Bazel how the project is structured. Create the -`BUILD` file at the root of the Bazel workspace and configure it to do an -initial build of the project as follows: - -* [Step 3a: Add the application target](#step-3a-add-the-application-target) -* [Step 3b: (Optional) Add the test target(s)](#step-3b-optional-add-the-test-target-s) -* [Step 3c: Add the library target(s)](#step-3c-add-the-library-target-s) - -**Tip:** To learn more about packages and other Bazel concepts, see [Workspaces, -packages, and targets](/concepts/build-ref). - -#### Step 3a: Add the application target - -Add a -[`macos_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_application) -or an -[`ios_application`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_application) -rule target. This target builds a macOS or iOS application bundle, respectively. -In the target, specify the following at the minimum: - -* `bundle_id` - the bundle ID (reverse-DNS path followed by app name) of the - binary. - -* `provisioning_profile` - provisioning profile from your Apple Developer - account (if building for an iOS device device). - -* `families` (iOS only) - whether to build the application for iPhone, iPad, - or both. - -* `infoplists` - list of .plist files to merge into the final Info.plist file. - -* `minimum_os_version` - the minimum version of macOS or iOS that the - application supports. This ensures Bazel builds the application with the - correct API levels. - -#### Step 3b: (Optional) Add the test target(s) - -Bazel's [Apple build -rules](https://github.com/bazelbuild/rules_apple) support running -unit and UI tests on all Apple platforms. Add test targets as follows: - -* [`macos_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-macos.md#macos_unit_test) - to run library-based and application-based unit tests on a macOS. - -* [`ios_unit_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_unit_test) - to build and run library-based unit tests on iOS. - -* [`ios_ui_test`](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-ios.md#ios_ui_test) - to build and run user interface tests in the iOS simulator. - -* Similar test rules exist for - [tvOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-tvos.md), - [watchOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-watchos.md) - and - [visionOS](https://github.com/bazelbuild/rules_apple/blob/master/doc/rules-visionos.md). - -At the minimum, specify a value for the `minimum_os_version` attribute. While -other packaging attributes, such as `bundle_identifier` and `infoplists`, -default to most commonly used values, ensure that those defaults are compatible -with the project and adjust them as necessary. For tests that require the iOS -simulator, also specify the `ios_application` target name as the value of the -`test_host` attribute. - -#### Step 3c: Add the library target(s) - -Add an [`objc_library`](/reference/be/objective-c#objc_library) target for each -Objective-C library and a -[`swift_library`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_library) -target for each Swift library on which the application and/or tests depend. - -Add the library targets as follows: - -* Add the application library targets as dependencies to the application - targets. - -* Add the test library targets as dependencies to the test targets. - -* List the implementation sources in the `srcs` attribute. - -* List the headers in the `hdrs` attribute. - -Note: You can use the [`glob`](/reference/be/functions#glob) function to include -all sources and/or headers of a certain type. Use it carefully as it might -include files you do not want Bazel to build. - -You can browse existing examples for various types of applications directly in -the [rules_apple examples -directory](https://github.com/bazelbuild/rules_apple/tree/master/examples/). For -example: - -* [macOS application targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/macos) - -* [iOS applications targets](https://github.com/bazelbuild/rules_apple/tree/master/examples/ios) - -* [Multi platform applications (macOS, iOS, watchOS, tvOS)](https://github.com/bazelbuild/rules_apple/tree/master/examples/multi_platform) - -For more information on build rules, see [Apple Rules for -Bazel](https://github.com/bazelbuild/rules_apple). - -At this point, it is a good idea to test the build: - -`bazel build //:` - -### Step 4: (Optional) Granularize the build - -If the project is large, or as it grows, consider chunking it into multiple -Bazel packages. This increased granularity provides: - -* Increased incrementality of builds, - -* Increased parallelization of build tasks, - -* Better maintainability for future users, - -* Better control over source code visibility across targets and packages. This - prevents issues such as libraries containing implementation details leaking - into public APIs. - -Tips for granularizing the project: - -* Put each library in its own Bazel package. Start with those requiring the - fewest dependencies and work your way up the dependency tree. - -* As you add `BUILD` files and specify targets, add these new targets to the - `deps` attributes of targets that depend on them. - -* The `glob()` function does not cross package boundaries, so as the number of - packages grows the files matched by `glob()` will shrink. - -* When adding a `BUILD` file to a `main` directory, also add a `BUILD` file to - the corresponding `test` directory. - -* Enforce healthy visibility limits across packages. - -* Build the project after each major change to the `BUILD` files and fix build - errors as you encounter them. - -### Step 5: Run the build - -Run the fully migrated build to ensure it completes with no errors or warnings. -Run every application and test target individually to more easily find sources -of any errors that occur. - -For example: - -```posix-terminal -bazel build //:my-target -``` - -### Step 6: Generate the Xcode project with rules_xcodeproj - -When building with Bazel, the `MODULE.bazel` and `BUILD` files become the source -of truth about the build. To make Xcode aware of this, you must generate a -Bazel-compatible Xcode project using -[rules_xcodeproj](https://github.com/buildbuddy-io/rules_xcodeproj#features) -. - -### Troubleshooting - -Bazel errors can arise when it gets out of sync with the selected Xcode version, -like when you apply an update. Here are some things to try if you're -experiencing errors with Xcode, for example "Xcode version must be specified to -use an Apple CROSSTOOL". - -* Manually run Xcode and accept any terms and conditions. - -* Use Xcode select to indicate the correct version, accept the license, and - clear Bazel's state. - -```posix-terminal - sudo xcode-select -s /Applications/Xcode.app/Contents/Developer - - sudo xcodebuild -license - - bazel sync --configure -``` - -* If this does not work, you may also try running `bazel clean --expunge`. - -Note: If you've saved your Xcode to a different path, you can use `xcode-select --s` to point to that path. diff --git a/8.4.2/query/aquery.mdx b/8.4.2/query/aquery.mdx deleted file mode 100644 index 2176ff6..0000000 --- a/8.4.2/query/aquery.mdx +++ /dev/null @@ -1,385 +0,0 @@ ---- -title: 'Action Graph Query (aquery)' ---- - - - -The `aquery` command allows you to query for actions in your build graph. -It operates on the post-analysis Configured Target Graph and exposes -information about **Actions, Artifacts and their relationships.** - -`aquery` is useful when you are interested in the properties of the Actions/Artifacts -generated from the Configured Target Graph. For example, the actual commands run -and their inputs/outputs/mnemonics. - -The tool accepts several command-line [options](#command-options). -Notably, the aquery command runs on top of a regular Bazel build and inherits -the set of options available during a build. - -It supports the same set of functions that is also available to traditional -`query` but `siblings`, `buildfiles` and -`tests`. - -An example `aquery` output (without specific details): - -``` -$ bazel aquery 'deps(//some:label)' -action 'Writing file some_file_name' - Mnemonic: ... - Target: ... - Configuration: ... - ActionKey: ... - Inputs: [...] - Outputs: [...] -``` - -## Basic syntax - -A simple example of the syntax for `aquery` is as follows: - -`bazel aquery "aquery_function(function(//target))"` - -The query expression (in quotes) consists of the following: - -* `aquery_function(...)`: functions specific to `aquery`. - More details [below](#using-aquery-functions). -* `function(...)`: the standard [functions](/query/language#functions) - as traditional `query`. -* `//target` is the label to the interested target. - -``` -# aquery examples: -# Get the action graph generated while building //src/target_a -$ bazel aquery '//src/target_a' - -# Get the action graph generated while building all dependencies of //src/target_a -$ bazel aquery 'deps(//src/target_a)' - -# Get the action graph generated while building all dependencies of //src/target_a -# whose inputs filenames match the regex ".*cpp". -$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))' -``` - -## Using aquery functions - -There are three `aquery` functions: - -* `inputs`: filter actions by inputs. -* `outputs`: filter actions by outputs -* `mnemonic`: filter actions by mnemonic - -`expr ::= inputs(word, expr)` - - The `inputs` operator returns the actions generated from building `expr`, - whose input filenames match the regex provided by `word`. - -`$ bazel aquery 'inputs(".*cpp", deps(//src/target_a))'` - -`outputs` and `mnemonic` functions share a similar syntax. - -You can also combine functions to achieve the AND operation. For example: - -``` - $ bazel aquery 'mnemonic("Cpp.*", (inputs(".*cpp", inputs("foo.*", //src/target_a))))' -``` - - The above command would find all actions involved in building `//src/target_a`, - whose mnemonics match `"Cpp.*"` and inputs match the patterns - `".*cpp"` and `"foo.*"`. - -Important: aquery functions can't be nested inside non-aquery functions. -Conceptually, this makes sense since the output of aquery functions is Actions, -not Configured Targets. - -An example of the syntax error produced: - -``` - $ bazel aquery 'deps(inputs(".*cpp", //src/target_a))' - ERROR: aquery filter functions (inputs, outputs, mnemonic) produce actions, - and therefore can't be the input of other function types: deps - deps(inputs(".*cpp", //src/target_a)) -``` - -## Options - -### Build options - -`aquery` runs on top of a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) -available during a build. - -### Aquery options - -#### `--output=(text|summary|proto|jsonproto|textproto), default=text` - -The default output format (`text`) is human-readable, -use `proto`, `textproto`, or `jsonproto` for machine-readable format. -The proto message is `analysis.ActionGraphContainer`. - -#### `--include_commandline, default=true` - -Includes the content of the action command lines in the output (potentially large). - -#### `--include_artifacts, default=true` - -Includes names of the action inputs and outputs in the output (potentially large). - -#### `--include_aspects, default=true` - -Whether to include Aspect-generated actions in the output. - -#### `--include_param_files, default=false` - -Include the content of the param files used in the command (potentially large). - -Warning: Enabling this flag will automatically enable the `--include_commandline` flag. - -#### `--include_file_write_contents, default=false` - -Include file contents for the `actions.write()` action and the contents of the -manifest file for the `SourceSymlinkManifest` action The file contents is -returned in the `file_contents` field with `--output=`xxx`proto`. -With `--output=text`, the output has -``` -FileWriteContents: [] -``` -line - -#### `--skyframe_state, default=false` - -Without performing extra analysis, dump the Action Graph from Skyframe. - -Note: Specifying a target with `--skyframe_state` is currently not supported. -This flag is only available with `--output=proto` or `--output=textproto`. - -## Other tools and features - -### Querying against the state of Skyframe - -[Skyframe](/reference/skyframe) is the evaluation and -incrementality model of Bazel. On each instance of Bazel server, Skyframe stores the dependency graph -constructed from the previous runs of the [Analysis phase](/run/build#analysis). - -In some cases, it is useful to query the Action Graph on Skyframe. -An example use case would be: - -1. Run `bazel build //target_a` -2. Run `bazel build //target_b` -3. File `foo.out` was generated. - -_As a Bazel user, I want to determine if `foo.out` was generated from building -`//target_a` or `//target_b`_. - -One could run `bazel aquery 'outputs("foo.out", //target_a)'` and -`bazel aquery 'outputs("foo.out", //target_b)'` to figure out the action responsible -for creating `foo.out`, and in turn the target. However, the number of different -targets previously built can be larger than 2, which makes running multiple `aquery` -commands a hassle. - -As an alternative, the `--skyframe_state` flag can be used: - -``` - # List all actions on Skyframe's action graph - $ bazel aquery --output=proto --skyframe_state - - # or - - # List all actions on Skyframe's action graph, whose output matches "foo.out" - $ bazel aquery --output=proto --skyframe_state 'outputs("foo.out")' -``` - -With `--skyframe_state` mode, `aquery` takes the content of the Action Graph -that Skyframe keeps on the instance of Bazel, (optionally) performs filtering on it and -outputs the content, without re-running the analysis phase. - -#### Special considerations - -##### Output format - -`--skyframe_state` is currently only available for `--output=proto` -and `--output=textproto` - -##### Non-inclusion of target labels in the query expression - -Currently, `--skyframe_state` queries the whole action graph that exists on Skyframe, -regardless of the targets. Having the target label specified in the query together with -`--skyframe_state` is considered a syntax error: - -``` - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state **//target_a** - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # WRONG: Target Included - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java", **//target_a**)' - ERROR: Error while parsing '//target_a)': Specifying build target(s) [//target_a] with --skyframe_state is currently not supported. - - # CORRECT: Without Target - $ bazel aquery --output=proto --skyframe_state - $ bazel aquery --output=proto --skyframe_state 'inputs(".*.java")' -``` - -### Comparing aquery outputs - -You can compare the outputs of two different aquery invocations using the `aquery_differ` tool. -For instance: when you make some changes to your rule definition and want to verify that the -command lines being run did not change. `aquery_differ` is the tool for that. - -The tool is available in the [bazelbuild/bazel](https://github.com/bazelbuild/bazel/tree/master/tools/aquery_differ) repository. -To use it, clone the repository to your local machine. An example usage: - -``` - $ bazel run //tools/aquery_differ -- \ - --before=/path/to/before.proto \ - --after=/path/to/after.proto \ - --input_type=proto \ - --attrs=cmdline \ - --attrs=inputs -``` - -The above command returns the difference between the `before` and `after` aquery outputs: -which actions were present in one but not the other, which actions have different -command line/inputs in each aquery output, ...). The result of running the above command would be: - -``` - Aquery output 'after' change contains an action that generates the following outputs that aquery output 'before' change doesn't: - ... - /list of output files/ - ... - - [cmdline] - Difference in the action that generates the following output(s): - /path/to/abc.out - --- /path/to/before.proto - +++ /path/to/after.proto - @@ -1,3 +1,3 @@ - ... - /cmdline diff, in unified diff format/ - ... -``` - -#### Command options - -`--before, --after`: The aquery output files to be compared - -`--input_type=(proto|text_proto), default=proto`: the format of the input -files. Support is provided for `proto` and `textproto` aquery output. - -`--attrs=(cmdline|inputs), default=cmdline`: the attributes of actions -to be compared. - -### Aspect-on-aspect - -It is possible for [Aspects](/extending/aspects) -to be applied on top of each other. The aquery output of the action generated by -these Aspects would then include the _Aspect path_, which is the sequence of -Aspects applied to the target which generated the action. - -An example of Aspect-on-Aspect: - -``` - t0 - ^ - | <- a1 - t1 - ^ - | <- a2 - t2 -``` - -Let ti be a target of rule ri, which applies an Aspect ai -to its dependencies. - -Assume that a2 generates an action X when applied to target t0. The text output of -`bazel aquery --include_aspects 'deps(//t2)'` for action X would be: - -``` - action ... - Mnemonic: ... - Target: //my_pkg:t0 - Configuration: ... - AspectDescriptors: [//my_pkg:rule.bzl%**a2**(foo=...) - -> //my_pkg:rule.bzl%**a1**(bar=...)] - ... -``` - -This means that action `X` was generated by Aspect `a2` applied onto -`a1(t0)`, where `a1(t0)` is the result of Aspect `a1` applied -onto target `t0`. - -Each `AspectDescriptor` has the following format: - -``` - AspectClass([param=value,...]) -``` - -`AspectClass` could be the name of the Aspect class (for native Aspects) or -`bzl_file%aspect_name` (for Starlark Aspects). `AspectDescriptor` are -sorted in topological order of the -[dependency graph](/extending/aspects#aspect_basics). - -### Linking with the JSON profile - -While aquery provides information about the actions being run in a build (why they're being run, -their inputs/outputs), the [JSON profile](/rules/performance#performance-profiling) -tells us the timing and duration of their execution. -It is possible to combine these 2 sets of information via a common denominator: an action's primary output. - -To include actions' outputs in the JSON profile, generate the profile with -`--experimental_include_primary_output --noslim_profile`. -Slim profiles are incompatible with the inclusion of primary outputs. An action's primary output -is included by default by aquery. - -We don't currently provide a canonical tool to combine these 2 data sources, but you should be -able to build your own script with the above information. - -## Known issues - -### Handling shared actions - -Sometimes actions are -[shared](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=59;drc=146d51aa1ec9dcb721a7483479ef0b1ac21d39f1) -between configured targets. - -In the execution phase, those shared actions are -[simply considered as one](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/Actions.java;l=241;drc=003b8734036a07b496012730964ac220f486b61f) and only executed once. -However, aquery operates on the pre-execution, post-analysis action graph, and hence treats these -like separate actions whose output Artifacts have the exact same `execPath`. As a result, -equivalent Artifacts appear duplicated. - -The list of aquery issues/planned features can be found on -[GitHub](https://github.com/bazelbuild/bazel/labels/team-Performance). - -## FAQs - -### The ActionKey remains the same even though the content of an input file changed. - -In the context of aquery, the `ActionKey` refers to the `String` gotten from -[ActionAnalysisMetadata#getKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/actions/ActionAnalysisMetadata.java;l=89;drc=8b856f5484f0117b2aebc302f849c2a15f273310): - -``` - Returns a string encoding all of the significant behaviour of this Action that might affect the - output. The general contract of `getKey` is this: if the work to be performed by the - execution of this action changes, the key must change. - - ... - - Examples of changes that should affect the key are: - - - Changes to the BUILD file that materially affect the rule which gave rise to this Action. - - Changes to the command-line options, environment, or other global configuration resources - which affect the behaviour of this kind of Action (other than changes to the names of the - input/output files, which are handled externally). - - An upgrade to the build tools which changes the program logic of this kind of Action - (typically this is achieved by incorporating a UUID into the key, which is changed each - time the program logic of this action changes). - Note the following exception: for actions that discover inputs, the key must change if any - input names change or else action validation may falsely validate. -``` - -This excludes the changes to the content of the input files, and is not to be confused with -[RemoteCacheClient#ActionKey](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/remote/common/RemoteCacheClient.java;l=38;drc=21577f202eb90ce94a337ebd2ede824d609537b6). - -## Updates - -For any issues/feature requests, please file an issue [here](https://github.com/bazelbuild/bazel/issues/new). diff --git a/8.4.2/query/cquery.mdx b/8.4.2/query/cquery.mdx deleted file mode 100644 index bd829c8..0000000 --- a/8.4.2/query/cquery.mdx +++ /dev/null @@ -1,646 +0,0 @@ ---- -title: 'Configurable Query (cquery)' ---- - - - -`cquery` is a variant of [`query`](/query/language) that correctly handles -[`select()`](/docs/configurable-attributes) and build options' effects on the build -graph. - -It achieves this by running over the results of Bazel's [analysis -phase](/extending/concepts#evaluation-model), -which integrates these effects. `query`, by contrast, runs over the results of -Bazel's loading phase, before options are evaluated. - -For example: - -``` -$ cat > tree/BUILD <<EOF -sh_library( - name = "ash", - deps = select({ - ":excelsior": [":manna-ash"], - ":americana": [":white-ash"], - "//conditions:default": [":common-ash"], - }), -) -sh_library(name = "manna-ash") -sh_library(name = "white-ash") -sh_library(name = "common-ash") -config_setting( - name = "excelsior", - values = {"define": "species=excelsior"}, -) -config_setting( - name = "americana", - values = {"define": "species=americana"}, -) -EOF -``` - -``` -# Traditional query: query doesn't know which select() branch you will choose, -# so it conservatively lists all of possible choices, including all used config_settings. -$ bazel query "deps(//tree:ash)" --noimplicit_deps -//tree:americana -//tree:ash -//tree:common-ash -//tree:excelsior -//tree:manna-ash -//tree:white-ash - -# cquery: cquery lets you set build options at the command line and chooses -# the exact dependencies that implies (and also the config_setting targets). -$ bazel cquery "deps(//tree:ash)" --define species=excelsior --noimplicit_deps -//tree:ash (9f87702) -//tree:manna-ash (9f87702) -//tree:americana (9f87702) -//tree:excelsior (9f87702) -``` - -Each result includes a [unique identifier](#configurations) `(9f87702)` of -the [configuration](/reference/glossary#configuration) the -target is built with. - -Since `cquery` runs over the configured target graph. it doesn't have insight -into artifacts like build actions nor access to [`test_suite`](/reference/be/general#test_suite) -rules as they are not configured targets. For the former, see [`aquery`](/query/aquery). - -## Basic syntax - -A simple `cquery` call looks like: - -`bazel cquery "function(//target)"` - -The query expression `"function(//target)"` consists of the following: - -* **`function(...)`** is the function to run on the target. `cquery` - supports most - of `query`'s [functions](/query/language#functions), plus a - few new ones. -* **`//target`** is the expression fed to the function. In this example, the - expression is a simple target. But the query language also allows nesting of functions. - See the [Query guide](/query/guide) for examples. - - -`cquery` requires a target to run through the [loading and analysis](/extending/concepts#evaluation-model) -phases. Unless otherwise specified, `cquery` parses the target(s) listed in the -query expression. See [`--universe_scope`](#universe-scope) -for querying dependencies of top-level build targets. - -## Configurations - -The line: - -``` -//tree:ash (9f87702) -``` - -means `//tree:ash` was built in a configuration with ID `9f87702`. For most -targets, this is an opaque hash of the build option values defining the -configuration. - -To see the configuration's complete contents, run: - -``` -$ bazel config 9f87702 -``` - -`9f87702` is a prefix of the complete ID. This is because complete IDs are -SHA-256 hashes, which are long and hard to follow. `cquery` understands any valid -prefix of a complete ID, similar to -[Git short hashes](https://git-scm.com/book/en/v2/Git-Tools-Revision-Selection#_revision_selection). - To see complete IDs, run `$ bazel config`. - -## Target pattern evaluation - -`//foo` has a different meaning for `cquery` than for `query`. This is because -`cquery` evaluates _configured_ targets and the build graph may have multiple -configured versions of `//foo`. - -For `cquery`, a target pattern in the query expression evaluates -to every configured target with a label that matches that pattern. Output is -deterministic, but `cquery` makes no ordering guarantee beyond the -[core query ordering contract](/query/language#graph-order). - -This produces subtler results for query expressions than with `query`. -For example, the following can produce multiple results: - -``` -# Analyzes //foo in the target configuration, but also analyzes -# //genrule_with_foo_as_tool which depends on an exec-configured -# //foo. So there are two configured target instances of //foo in -# the build graph. -$ bazel cquery //foo --universe_scope=//foo,//genrule_with_foo_as_tool -//foo (9f87702) -//foo (exec) -``` - -If you want to precisely declare which instance to query over, use -the [`config`](#config) function. - -See `query`'s [target pattern -documentation](/query/language#target-patterns) for more information on target patterns. - -## Functions - -Of the [set of functions](/query/language#functions "list of query functions") -supported by `query`, `cquery` supports all but -[`allrdeps`](/query/language#allrdeps), -[`buildfiles`](/query/language#buildfiles), -[`rbuildfiles`](/query/language#rbuildfiles), -[`siblings`](/query/language#siblings), [`tests`](/query/language#tests), and -[`visible`](/query/language#visible). - -`cquery` also introduces the following new functions: - -### config - -`expr ::= config(expr, word)` - -The `config` operator attempts to find the configured target for -the label denoted by the first argument and configuration specified by the -second argument. - -Valid values for the second argument are `null` or a -[custom configuration hash](#configurations). Hashes can be retrieved from `$ -bazel config` or a previous `cquery`'s output. - -Examples: - -``` -$ bazel cquery "config(//bar, 3732cc8)" --universe_scope=//foo -``` - -``` -$ bazel cquery "deps(//foo)" -//bar (exec) -//baz (exec) - -$ bazel cquery "config(//baz, 3732cc8)" -``` - -If not all results of the first argument can be found in the specified -configuration, only those that can be found are returned. If no results -can be found in the specified configuration, the query fails. - -## Options - -### Build options - -`cquery` runs over a regular Bazel build and thus inherits the set of -[options](/reference/command-line-reference#build-options) available during a build. - -### Using cquery options - -#### `--universe_scope` (comma-separated list) - -Often, the dependencies of configured targets go through -[transitions](/extending/rules#configurations), -which causes their configuration to differ from their dependent. This flag -allows you to query a target as if it were built as a dependency or a transitive -dependency of another target. For example: - -``` -# x/BUILD -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_binary( - name = "tool", - srcs = ["tool.cpp"], -) -``` - -Genrules configure their tools in the -[exec configuration](/extending/rules#configurations) -so the following queries would produce the following outputs: - - - - - - - - - - - - - - - - - - - - - -
QueryTarget BuiltOutput
bazel cquery "//x:tool"//x:tool//x:tool(targetconfig)
bazel cquery "//x:tool" --universe_scope="//x:my_gen"//x:my_gen//x:tool(execconfig)
- -If this flag is set, its contents are built. _If it's not set, all targets -mentioned in the query expression are built_ instead. The transitive closure of the -built targets are used as the universe of the query. Either way, the targets to -be built must be buildable at the top level (that is, compatible with top-level -options). `cquery` returns results in the transitive closure of these -top-level targets. - -Even if it's possible to build all targets in a query expression at the top -level, it may be beneficial to not do so. For example, explicitly setting -`--universe_scope` could prevent building targets multiple times in -configurations you don't care about. It could also help specify which configuration version of a -target you're looking for (since it's not currently possible -to fully specify this any other way). You should set this flag -if your query expression is more complex than `deps(//foo)`. - -#### `--implicit_deps` (boolean, default=True) - -Setting this flag to false filters out all results that aren't explicitly set in -the BUILD file and instead set elsewhere by Bazel. This includes filtering resolved -toolchains. - -#### `--tool_deps` (boolean, default=True) - -Setting this flag to false filters out all configured targets for which the -path from the queried target to them crosses a transition between the target -configuration and the -[non-target configurations](/extending/rules#configurations). -If the queried target is in the target configuration, setting `--notool_deps` will -only return targets that also are in the target configuration. If the queried -target is in a non-target configuration, setting `--notool_deps` will only return -targets also in non-target configurations. This setting generally does not affect filtering -of resolved toolchains. - -#### `--include_aspects` (boolean, default=True) - -Include dependencies added by [aspects](/extending/aspects). - -If this flag is disabled, `cquery somepath(X, Y)` and -`cquery deps(X) | grep 'Y'` omit Y if X only depends on it through an aspect. - -## Output formats - -By default, cquery outputs results in a dependency-ordered list of label and configuration pairs. -There are other options for exposing the results as well. - -### Transitions - -``` ---transitions=lite ---transitions=full -``` - -Configuration [transitions](/extending/rules#configurations) -are used to build targets underneath the top level targets in different -configurations than the top level targets. - -For example, a target might impose a transition to the exec configuration on all -dependencies in its `tools` attribute. These are known as attribute -transitions. Rules can also impose transitions on their own configurations, -known as rule class transitions. This output format outputs information about -these transitions such as what type they are and the effect they have on build -options. - -This output format is triggered by the `--transitions` flag which by default is -set to `NONE`. It can be set to `FULL` or `LITE` mode. `FULL` mode outputs -information about rule class transitions and attribute transitions including a -detailed diff of the options before and after the transition. `LITE` mode -outputs the same information without the options diff. - -### Protocol message output - -``` ---output=proto -``` - -This option causes the resulting targets to be printed in a binary protocol -buffer form. The definition of the protocol buffer can be found at -[src/main/protobuf/analysis_v2.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/protobuf/analysis_v2.proto). - -`CqueryResult` is the top level message containing the results of the cquery. It -has a list of `ConfiguredTarget` messages and a list of `Configuration` -messages. Each `ConfiguredTarget` has a `configuration_id` whose value is equal -to that of the `id` field from the corresponding `Configuration` message. - -#### --[no]proto:include_configurations - -By default, cquery results return configuration information as part of each -configured target. If you'd like to omit this information and get proto output -that is formatted exactly like query's proto output, set this flag to false. - -See [query's proto output documentation](/query/language#output-formats) -for more proto output-related options. - -Note: While selects are resolved both at the top level of returned -targets and within attributes, all possible inputs for selects are still -included as `rule_input` fields. - -### Graph output - -``` ---output=graph -``` - -This option generates output as a Graphviz-compatible .dot file. See `query`'s -[graph output documentation](/query/language#display-result-graph) for details. `cquery` -also supports [`--graph:node_limit`](/query/language#graph-nodelimit) and -[`--graph:factored`](/query/language#graph-factored). - -### Files output - -``` ---output=files -``` - -This option prints a list of the output files produced by each target matched -by the query similar to the list printed at the end of a `bazel build` -invocation. The output contains only the files advertised in the requested -output groups as determined by the -[`--output_groups`](/reference/command-line-reference#flag--output_groups) flag. -It does include source files. - -All paths emitted by this output format are relative to the -[execroot](https://bazel.build/remote/output-directories), which can be obtained -via `bazel info execution_root`. If the `bazel-out` convenience symlink exists, -paths to files in the main repository also resolve relative to the workspace -directory. - -Note: The output of `bazel cquery --output=files //pkg:foo` contains the output -files of `//pkg:foo` in *all* configurations that occur in the build (also see -the [section on target pattern evaluation](#target-pattern-evaluation)). If that -is not desired, wrap you query in [`config(..., target)`](#config). - -### Defining the output format using Starlark - -``` ---output=starlark -``` - -This output format calls a [Starlark](/rules/language) -function for each configured target in the query result, and prints the value -returned by the call. The `--starlark:file` flag specifies the location of a -Starlark file that defines a function named `format` with a single parameter, -`target`. This function is called for each [Target](/rules/lib/builtins/Target) -in the query result. Alternatively, for convenience, you may specify just the -body of a function declared as `def format(target): return expr` by using the -`--starlark:expr` flag. - -#### 'cquery' Starlark dialect - -The cquery Starlark environment differs from a BUILD or .bzl file. It includes -all core Starlark -[built-in constants and functions](https://github.com/bazelbuild/starlark/blob/master/spec.md#built-in-constants-and-functions), -plus a few cquery-specific ones described below, but not (for example) `glob`, -`native`, or `rule`, and it does not support load statements. - -##### build_options(target) - -`build_options(target)` returns a map whose keys are build option identifiers (see -[Configurations](/extending/config)) -and whose values are their Starlark values. Build options whose values are not legal Starlark -values are omitted from this map. - -If the target is an input file, `build_options(target)` returns None, as input file -targets have a null configuration. - -##### providers(target) - -`providers(target)` returns a map whose keys are names of -[providers](/extending/rules#providers) -(for example, `"DefaultInfo"`) and whose values are their Starlark values. Providers -whose values are not legal Starlark values are omitted from this map. - -#### Examples - -Print a space-separated list of the base names of all files produced by `//foo`: - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="' '.join([f.basename for f in target.files.to_list()])" -``` - -Print a space-separated list of the paths of all files produced by **rule** targets in -`//bar` and its subpackages: - -``` - bazel cquery 'kind(rule, //bar/...)' --output=starlark \ - --starlark:expr="' '.join([f.path for f in target.files.to_list()])" -``` - -Print a list of the mnemonics of all actions registered by `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="[a.mnemonic for a in target.actions]" -``` - -Print a list of compilation outputs registered by a `cc_library` `//baz`. - -``` - bazel cquery //baz --output=starlark \ - --starlark:expr="[f.path for f in target.output_groups.compilation_outputs.to_list()]" -``` - -Print the value of the command line option `--javacopt` when building `//foo`. - -``` - bazel cquery //foo --output=starlark \ - --starlark:expr="build_options(target)['//command_line_option:javacopt']" -``` - -Print the label of each target with exactly one output. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def has_one_output(target): - return len(target.files.to_list()) == 1 - - def format(target): - if has_one_output(target): - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Print the label of each target which is strictly Python 3. This example uses -Starlark functions defined in a file. - -``` - $ cat example.cquery - - def format(target): - p = providers(target) - py_info = p.get("PyInfo") - if py_info and py_info.has_py3_only_sources: - return target.label - else: - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -Extract a value from a user defined Provider. - -``` - $ cat some_package/my_rule.bzl - - MyRuleInfo = provider(fields={"color": "the name of a color"}) - - def _my_rule_impl(ctx): - ... - return [MyRuleInfo(color="red")] - - my_rule = rule( - implementation = _my_rule_impl, - attrs = {...}, - ) - - $ cat example.cquery - - def format(target): - p = providers(target) - my_rule_info = p.get("//some_package:my_rule.bzl%MyRuleInfo'") - if my_rule_info: - return my_rule_info.color - return "" - - $ bazel cquery //baz --output=starlark --starlark:file=example.cquery -``` - -## cquery vs. query - -`cquery` and `query` complement each other and excel in -different niches. Consider the following to decide which is right for you: - -* `cquery` follows specific `select()` branches to - model the exact graph you build. `query` doesn't know which - branch the build chooses, so overapproximates by including all branches. -* `cquery`'s precision requires building more of the graph than - `query` does. Specifically, `cquery` - evaluates _configured targets_ while `query` only - evaluates _targets_. This takes more time and uses more memory. -* `cquery`'s interpretation of - the [query language](/query/language) introduces ambiguity - that `query` avoids. For example, - if `"//foo"` exists in two configurations, which one - should `cquery "deps(//foo)"` use? - The [`config`](#config) function can help with this. -* As a newer tool, `cquery` lacks support for certain use - cases. See [Known issues](#known-issues) for details. - -## Known issues - -**All targets that `cquery` "builds" must have the same configuration.** - -Before evaluating queries, `cquery` triggers a build up to just -before the point where build actions would execute. The targets it -"builds" are by default selected from all labels that appear in the query -expression (this can be overridden -with [`--universe_scope`](#universe-scope)). These -must have the same configuration. - -While these generally share the top-level "target" configuration, -rules can change their own configuration with -[incoming edge transitions](/extending/config#incoming-edge-transitions). -This is where `cquery` falls short. - -Workaround: If possible, set `--universe_scope` to a stricter -scope. For example: - -``` -# This command attempts to build the transitive closures of both //foo and -# //bar. //bar uses an incoming edge transition to change its --cpu flag. -$ bazel cquery 'somepath(//foo, //bar)' -ERROR: Error doing post analysis query: Top-level targets //foo and //bar -have different configurations (top-level targets with different -configurations is not supported) - -# This command only builds the transitive closure of //foo, under which -# //bar should exist in the correct configuration. -$ bazel cquery 'somepath(//foo, //bar)' --universe_scope=//foo -``` - -**No support for [`--output=xml`](/query/language#xml).** - -**Non-deterministic output.** - -`cquery` does not automatically wipe the build graph from -previous commands and is therefore prone to picking up results from past -queries. For example, `genrule` exerts an exec transition on -its `tools` attribute - that is, it configures its tools in the -[exec configuration](/extending/rules#configurations). - -You can see the lingering effects of that transition below. - -``` -$ cat > foo/BUILD <<<EOF -genrule( - name = "my_gen", - srcs = ["x.in"], - outs = ["x.cc"], - cmd = "$(locations :tool) $< >$@", - tools = [":tool"], -) -cc_library( - name = "tool", -) -EOF - - $ bazel cquery "//foo:tool" -tool(target_config) - - $ bazel cquery "deps(//foo:my_gen)" -my_gen (target_config) -tool (exec_config) -... - - $ bazel cquery "//foo:tool" -tool(exec_config) -``` - -Workaround: change any startup option to force re-analysis of configured targets. -For example, add `--test_arg=` to your build command. - -## Troubleshooting - -### Recursive target patterns (`/...`) - -If you encounter: - -``` -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, //foo/...)" -ERROR: Error doing post analysis query: Evaluation failed: Unable to load package '[foo]' -because package is not in scope. Check that all target patterns in query expression are within the ---universe_scope of this query. -``` - -this incorrectly suggests package `//foo` isn't in scope even though -`--universe_scope=//foo:app` includes it. This is due to design limitations in -`cquery`. As a workaround, explicitly include `//foo/...` in the universe -scope: - -``` -$ bazel cquery --universe_scope=//foo:app,//foo/... "somepath(//foo:app, //foo/...)" -``` - -If that doesn't work (for example, because some target in `//foo/...` can't -build with the chosen build flags), manually unwrap the pattern into its -constituent packages with a pre-processing query: - -``` -# Replace "//foo/..." with a subshell query call (not cquery!) outputting each package, piped into -# a sed call converting "<pkg>" to "//<pkg>:*", piped into a "+"-delimited line merge. -# Output looks like "//foo:*+//foo/bar:*+//foo/baz". -# -$ bazel cquery --universe_scope=//foo:app "somepath(//foo:app, $(bazel query //foo/... ---output=package | sed -e 's/^/\/\//' -e 's/$/:*/' | paste -sd "+" -))" -``` diff --git a/8.4.2/reference/glossary.mdx b/8.4.2/reference/glossary.mdx deleted file mode 100644 index 3b0b497..0000000 --- a/8.4.2/reference/glossary.mdx +++ /dev/null @@ -1,715 +0,0 @@ ---- -title: 'Bazel Glossary' ---- - - - -### Action - -A command to run during the build, for example, a call to a compiler that takes -[artifacts](#artifact) as inputs and produces other artifacts as outputs. -Includes metadata like the command line arguments, action key, environment -variables, and declared input/output artifacts. - -**See also:** [Rules documentation](/extending/rules#actions) - -### Action cache - -An on-disk cache that stores a mapping of executed [actions](#action) to the -outputs they created. The cache key is known as the [action key](#action-key). A -core component for Bazel's incrementality model. The cache is stored in the -output base directory and thus survives Bazel server restarts. - -### Action graph - -An in-memory graph of [actions](#action) and the [artifacts](#artifact) that -these actions read and generate. The graph might include artifacts that exist as -source files (for example, in the file system) as well as generated -intermediate/final artifacts that are not mentioned in `BUILD` files. Produced -during the [analysis phase](#analysis-phase) and used during the [execution -phase](#execution-phase). - -### Action graph query (aquery) - -A [query](#query-concept) tool that can query over build [actions](#action). -This provides the ability to analyze how [build rules](#rule) translate into the -actual work builds do. - -### Action key - -The cache key of an [action](#action). Computed based on action metadata, which -might include the command to be executed in the action, compiler flags, library -locations, or system headers, depending on the action. Enables Bazel to cache or -invalidate individual actions deterministically. - -### Analysis phase - -The second phase of a build. Processes the [target graph](#target-graph) -specified in [`BUILD` files](#build-file) to produce an in-memory [action -graph](#action-graph) that determines the order of actions to run during the -[execution phase](#execution-phase). This is the phase in which rule -implementations are evaluated. - -### Artifact - -A source file or a generated file. Can also be a directory of files, known as -[tree artifacts](#tree-artifact). - -An artifact may be an input to multiple actions, but must only be generated by -at most one action. - -An artifact that corresponds to a [file target](#target) can be addressed by a -label. - -### Aspect - -A mechanism for rules to create additional [actions](#action) in their -dependencies. For example, if target A depends on B, one can apply an aspect on -A that traverses *up* a dependency edge to B, and runs additional actions in B -to generate and collect additional output files. These additional actions are -cached and reused between targets requiring the same aspect. Created with the -`aspect()` Starlark Build API function. Can be used, for example, to generate -metadata for IDEs, and create actions for linting. - -**See also:** [Aspects documentation](/extending/aspects) - -### Aspect-on-aspect - -A composition mechanism whereby aspects can be applied to the results -of other aspects. For example, an aspect that generates information for use by -IDEs can be applied on top of an aspect that generates `.java` files from a -proto. - -For an aspect `A` to apply on top of aspect `B`, the [providers](#provider) that -`B` advertises in its [`provides`](/rules/lib/globals#aspect.provides) attribute -must match what `A` declares it wants in its [`required_aspect_providers`](/rules/lib/globals#aspect.required_aspect_providers) -attribute. - -### Attribute - -A parameter to a [rule](#rule), used to express per-target build information. -Examples include `srcs`, `deps`, and `copts`, which respectively declare a -target's source files, dependencies, and custom compiler options. The particular -attributes available for a given target depend on its rule type. - -### .bazelrc - -Bazel’s configuration file used to change the default values for [startup -flags](#startup-flags) and [command flags](#command-flags), and to define common -groups of options that can then be set together on the Bazel command line using -a `--config` flag. Bazel can combine settings from multiple bazelrc files -(systemwide, per-workspace, per-user, or from a custom location), and a -`bazelrc` file may also import settings from other `bazelrc` files. - -### Blaze - -The Google-internal version of Bazel. Google’s main build system for its -mono-repository. - -### BUILD File - -A `BUILD` file is the main configuration file that tells Bazel what software -outputs to build, what their dependencies are, and how to build them. Bazel -takes a `BUILD` file as input and uses the file to create a graph of dependencies -and to derive the actions that must be completed to build intermediate and final -software outputs. A `BUILD` file marks a directory and any sub-directories not -containing a `BUILD` file as a [package](#package), and can contain -[targets](#target) created by [rules](#rule). The file can also be named -`BUILD.bazel`. - -### BUILD.bazel File - -See [`BUILD` File](#build-file). Takes precedence over a `BUILD` file in the same -directory. - -### .bzl File - -A file that defines rules, [macros](#macro), and constants written in -[Starlark](#starlark). These can then be imported into [`BUILD` -files](#build-file) using the `load()` function. - -// TODO: ### Build event protocol - -// TODO: ### Build flag - -### Build graph - -The dependency graph that Bazel constructs and traverses to perform a build. -Includes nodes like [targets](#target), [configured -targets](#configured-target), [actions](#action), and [artifacts](#artifact). A -build is considered complete when all [artifacts](#artifact) on which a set of -requested targets depend are verified as up-to-date. - -### Build setting - -A Starlark-defined piece of [configuration](#configuration). -[Transitions](#transition) can set build settings to change a subgraph's -configuration. If exposed to the user as a [command-line flag](#command-flags), -also known as a build flag. - -### Clean build - -A build that doesn't use the results of earlier builds. This is generally slower -than an [incremental build](#incremental-build) but commonly considered to be -more [correct](#correctness). Bazel guarantees both clean and incremental builds -are always correct. - -### Client-server model - -The `bazel` command-line client automatically starts a background server on the -local machine to execute Bazel [commands](#command). The server persists across -commands but automatically stops after a period of inactivity (or explicitly via -bazel shutdown). Splitting Bazel into a server and client helps amortize JVM -startup time and supports faster [incremental builds](#incremental-build) -because the [action graph](#action-graph) remains in memory across commands. - -### Command - -Used on the command line to invoke different Bazel functions, like `bazel -build`, `bazel test`, `bazel run`, and `bazel query`. - -### Command flags - -A set of flags specific to a [command](#command). Command flags are specified -*after* the command (`bazel build `). Flags can be applicable to -one or more commands. For example, `--configure` is a flag exclusively for the -`bazel sync` command, but `--keep_going` is applicable to `sync`, `build`, -`test` and more. Flags are often used for [configuration](#configuration) -purposes, so changes in flag values can cause Bazel to invalidate in-memory -graphs and restart the [analysis phase](#analysis-phase). - -### Configuration - -Information outside of [rule](#rule) definitions that impacts how rules generate -[actions](#action). Every build has at least one configuration specifying the -target platform, action environment variables, and command-line [build -flags](#command-flags). [Transitions](#transition) may create additional -configurations, such as for host tools or cross-compilation. - -**See also:** [Configurations](/extending/rules#configurations) - -// TODO: ### Configuration fragment - -### Configuration trimming - -The process of only including the pieces of [configuration](#configuration) a -target actually needs. For example, if you build Java binary `//:j` with C++ -dependency `//:c`, it's wasteful to include the value of `--javacopt` in the -configuration of `//:c` because changing `--javacopt` unnecessarily breaks C++ -build cacheability. - -### Configured query (cquery) - -A [query](#query-concept) tool that queries over [configured -targets](#configured-target) (after the [analysis phase](#analysis-phase) -completes). This means `select()` and [build flags](#command-flags) (such as -`--platforms`) are accurately reflected in the results. - -**See also:** [cquery documentation](/query/cquery) - -### Configured target - -The result of evaluating a [target](#target) with a -[configuration](#configuration). The [analysis phase](#analysis-phase) produces -this by combining the build's options with the targets that need to be built. -For example, if `//:foo` builds for two different architectures in the same -build, it has two configured targets: `` and ``. - -### Correctness - -A build is correct when its output faithfully reflects the state of its -transitive inputs. To achieve correct builds, Bazel strives to be -[hermetic](#hermeticity), reproducible, and making [build -analysis](#analysis-phase) and [action execution](#execution-phase) -deterministic. - -### Dependency - -A directed edge between two [targets](#target). A target `//:foo` has a *target -dependency* on target `//:bar` if `//:foo`'s attribute values contain a -reference to `//:bar`. `//:foo` has an *action dependency* on `//:bar` if an -action in `//:foo` depends on an input [artifact](#artifact) created by an -action in `//:bar`. - -In certain contexts, it could also refer to an _external dependency_; see -[modules](#module). - -### Depset - -A data structure for collecting data on transitive dependencies. Optimized so -that merging depsets is time and space efficient, because it’s common to have -very large depsets (hundreds of thousands of files). Implemented to -recursively refer to other depsets for space efficiency reasons. [Rule](#rule) -implementations should not "flatten" depsets by converting them to lists unless -the rule is at the top level of the build graph. Flattening large depsets incurs -huge memory consumption. Also known as *nested sets* in Bazel's internal -implementation. - -**See also:** [Depset documentation](/extending/depsets) - -### Disk cache - -A local on-disk blob store for the remote caching feature. Can be used in -conjunction with an actual remote blob store. - -### Distdir - -A read-only directory containing files that Bazel would otherwise fetch from the -internet using repository rules. Enables builds to run fully offline. - -### Dynamic execution - -An execution strategy that selects between local and remote execution based on -various heuristics, and uses the execution results of the faster successful -method. Certain [actions](#action) are executed faster locally (for example, -linking) and others are faster remotely (for example, highly parallelizable -compilation). A dynamic execution strategy can provide the best possible -incremental and clean build times. - -### Execution phase - -The third phase of a build. Executes the [actions](#action) in the [action -graph](#action-graph) created during the [analysis phase](#analysis-phase). -These actions invoke executables (compilers, scripts) to read and write -[artifacts](#artifact). *Spawn strategies* control how these actions are -executed: locally, remotely, dynamically, sandboxed, docker, and so on. - -### Execution root - -A directory in the [workspace](#workspace)’s [output base](#output-base) -directory where local [actions](#action) are executed in -non-[sandboxed](#sandboxing) builds. The directory contents are mostly symlinks -of input [artifacts](#artifact) from the workspace. The execution root also -contains symlinks to external repositories as other inputs and the `bazel-out` -directory to store outputs. Prepared during the [loading phase](#loading-phase) -by creating a *symlink forest* of the directories that represent the transitive -closure of packages on which a build depends. Accessible with `bazel info -execution_root` on the command line. - -### File - -See [Artifact](#artifact). - -### Hermeticity - -A build is hermetic if there are no external influences on its build and test -operations, which helps to make sure that results are deterministic and -[correct](#correctness). For example, hermetic builds typically disallow network -access to actions, restrict access to declared inputs, use fixed timestamps and -timezones, restrict access to environment variables, and use fixed seeds for -random number generators - -### Incremental build - -An incremental build reuses the results of earlier builds to reduce build time -and resource usage. Dependency checking and caching aim to produce correct -results for this type of build. An incremental build is the opposite of a clean -build. - -// TODO: ### Install base - -### Label - -An identifier for a [target](#target). Generally has the form -`@repo//path/to/package:target`, where `repo` is the (apparent) name of the -[repository](#repository) containing the target, `path/to/package` is the path -to the directory that contains the [`BUILD` file](#build-file) declaring the -target (this directory is also known as the [package](#package)), and `target` -is the name of the target itself. Depending on the situation, parts of this -syntax may be omitted. - -**See also**: [Labels](/concepts/labels) - -### Loading phase - -The first phase of a build where Bazel executes [`BUILD` files](#build-file) to -create [packages](#package). [Macros](#macro) and certain functions like -`glob()` are evaluated in this phase. Interleaved with the second phase of the -build, the [analysis phase](#analysis-phase), to build up a [target -graph](#target-graph). - -### Legacy macro - -A flavor of [macro](#macro) which is declared as an ordinary -[Starlark](#starlark) function, and which runs as a side effect of executing a -`BUILD` file. - -Legacy macros can do anything a function can. This means they can be convenient, -but they can also be harder to read, write, and use. A legacy macro might -unexpectedly mutate its arguments or fail when given a `select()` or ill-typed -argument. - -Contrast with [symbolic macros](#symbolic-macro). - -**See also:** [Legacy macro documentation](/extending/legacy-macros) - -### Macro - -A mechanism to compose multiple [rule](#rule) target declarations together under -a single [Starlark](#starlark) callable. Enables reusing common rule declaration -patterns across `BUILD` files. Expanded to the underlying rule target -declarations during the [loading phase](#loading-phase). - -Comes in two flavors: [symbolic macros](#symbolic-macro) (since Bazel 8) and -[legacy macros](#legacy-macro). - -### Mnemonic - -A short, human-readable string selected by a rule author to quickly understand -what an [action](#action) in the rule is doing. Mnemonics can be used as -identifiers for *spawn strategy* selections. Some examples of action mnemonics -are `Javac` from Java rules, `CppCompile` from C++ rules, and -`AndroidManifestMerger` from Android rules. - -### Module - -A Bazel project that can have multiple versions, each of which can have -dependencies on other modules. This is analogous to familiar concepts in other -dependency management systems, such as a Maven _artifact_, an npm _package_, a -Go _module_, or a Cargo _crate_. Modules form the backbone of Bazel's external -dependency management system. - -Each module is backed by a [repo](#repository) with a `MODULE.bazel` file at its -root. This file contains metadata about the module itself (such as its name and -version), its direct dependencies, and various other data including toolchain -registrations and [module extension](#module-extension) input. - -Module metadata is hosted in Bazel registries. - -**See also:** [Bazel modules](/external/module) - -### Module Extension - -A piece of logic that can be run to generate [repos](#repository) by reading -inputs from across the [module](#module) dependency graph and invoking [repo -rules](#repository-rule). Module extensions have capabilities similar to repo -rules, allowing them to access the internet, perform file I/O, and so on. - -**See also:** [Module extensions](/external/extension) - -### Native rules - -[Rules](#rule) that are built into Bazel and implemented in Java. Such rules -appear in [`.bzl` files](#bzl-file) as functions in the native module (for -example, `native.cc_library` or `native.java_library`). User-defined rules -(non-native) are created using [Starlark](#starlark). - -### Output base - -A [workspace](#workspace)-specific directory to store Bazel output files. Used -to separate outputs from the *workspace*'s source tree (the [main -repo](#repository)). Located in the [output user root](#output-user-root). - -### Output groups - -A group of files that is expected to be built when Bazel finishes building a -target. [Rules](#rule) put their usual outputs in the "default output group" -(e.g the `.jar` file of a `java_library`, `.a` and `.so` for `cc_library` -targets). The default output group is the output group whose -[artifacts](#artifact) are built when a target is requested on the command line. -Rules can define more named output groups that can be explicitly specified in -[`BUILD` files](#build-file) (`filegroup` rule) or the command line -(`--output_groups` flag). - -### Output user root - -A user-specific directory to store Bazel's outputs. The directory name is -derived from the user's system username. Prevents output file collisions if -multiple users are building the same project on the system at the same time. -Contains subdirectories corresponding to build outputs of individual workspaces, -also known as [output bases](#output-base). - -### Package - -The set of [targets](#target) defined by a [`BUILD` file](#build-file). A -package's name is the `BUILD` file's path relative to the [repo](#repository) -root. A package can contain subpackages, or subdirectories containing `BUILD` -files, thus forming a package hierarchy. - -### Package group - -A [target](#target) representing a set of packages. Often used in `visibility` -attribute values. - -### Platform - -A "machine type" involved in a build. This includes the machine Bazel runs on -(the "host" platform), the machines build tools execute on ("exec" platforms), -and the machines targets are built for ("target platforms"). - -### Provider - -A schema describing a unit of information to pass between -[rule targets](#rule-target) along dependency relationships. Typically this -contains information like compiler options, transitive source or output files, -and build metadata. Frequently used in conjunction with [depsets](#depset) to -efficiently store accumulated transitive data. An example of a built-in provider -is `DefaultInfo`. - -Note: The object holding specific data for a given rule target is -referred to as a "provider instance", although sometimes this is conflated with -"provider". - -**See also:** [Provider documentation](/extending/rules#providers) - -### Query (concept) - -The process of analyzing a [build graph](#build-graph) to understand -[target](#target) properties and dependency structures. Bazel supports three -query variants: [query](#query-command), [cquery](#configured-query), and -[aquery](#action-graph-query). - -### query (command) - -A [query](#query-concept) tool that operates over the build's post-[loading -phase](#loading-phase) [target graph](#target-graph). This is relatively fast, -but can't analyze the effects of `select()`, [build flags](#command-flags), -[artifacts](#artifact), or build [actions](#action). - -**See also:** [Query how-to](/query/guide), [Query reference](/query/language) - -### Repository - -A directory tree with a boundary marker file at its root, containing source -files that can be used in a Bazel build. Often shortened to just **repo**. - -A repo boundary marker file can be `MODULE.bazel` (signaling that this repo -represents a Bazel module), `REPO.bazel`, or in legacy contexts, `WORKSPACE` or -`WORKSPACE.bazel`. Any repo boundary marker file will signify the boundary of a -repo; multiple such files can coexist in a directory. - -The *main repo* is the repo in which the current Bazel command is being run. - -*External repos* are defined by specifying [modules](#module) in `MODULE.bazel` -files, or invoking [repo rules](#repository-rule) in [module -extensions](#module-extension). They can be fetched on demand to a predetermined -"magical" location on disk. - -Each repo has a unique, constant *canonical* name, and potentially different -*apparent* names when viewed from other repos. - -**See also**: [External dependencies overview](/external/overview) - -### Repository cache - -A shared content-addressable cache of files downloaded by Bazel for builds, -shareable across [workspaces](#workspace). Enables offline builds after the -initial download. Commonly used to cache files downloaded through [repository -rules](#repository-rule) like `http_archive` and repository rule APIs like -`repository_ctx.download`. Files are cached only if their SHA-256 checksums are -specified for the download. - -### Repository rule - -A schema for repository definitions that tells Bazel how to materialize (or -"fetch") a [repository](#repository). Often shortened to just **repo rule**. -Repo rules are invoked by Bazel internally to define repos backed by -[modules](#module), or can be invoked by [module extensions](#module-extension). -Repo rules can access the internet or perform file I/O; the most common repo -rule is `http_archive` to download an archive containing source files from the -internet. - -**See also:** [Repo rule documentation](/extending/repo) - -### Reproducibility - -The property of a build or test that a set of inputs to the build or test will -always produce the same set of outputs every time, regardless of time, method, -or environment. Note that this does not necessarily imply that the outputs are -[correct](#correctness) or the desired outputs. - -### Rule - -A schema for defining [rule targets](#rule-target) in a `BUILD` file, such as -`cc_library`. From the perspective of a `BUILD` file author, a rule consists of -a set of [attributes](#attributes) and black box logic. The logic tells the -rule target how to produce output [artifacts](#artifact) and pass information to -other rule targets. From the perspective of `.bzl` authors, rules are the -primary way to extend Bazel to support new programming languages and -environments. - -Rules are instantiated to produce rule targets in the -[loading phase](#loading-phase). In the [analysis phase](#analysis-phase) rule -targets communicate information to their downstream dependencies in the form of -[providers](#provider), and register [actions](#action) describing how to -generate their output artifacts. These actions are run in the [execution -phase](#execution-phase). - -Note: Historically the term "rule" has been used to refer to a rule target. -This usage was inherited from tools like Make, but causes confusion and should -be avoided for Bazel. - -**See also:** [Rules documentation](/extending/rules) - -### Rule target - -A [target](#target) that is an instance of a rule. Contrasts with file targets -and package groups. Not to be confused with [rule](#rule). - -### Runfiles - -The runtime dependencies of an executable [target](#target). Most commonly, the -executable is the executable output of a test rule, and the runfiles are runtime -data dependencies of the test. Before the invocation of the executable (during -bazel test), Bazel prepares the tree of runfiles alongside the test executable -according to their source directory structure. - -**See also:** [Runfiles documentation](/extending/rules#runfiles) - -### Sandboxing - -A technique to isolate a running [action](#action) inside a restricted and -temporary [execution root](#execution-root), helping to ensure that it doesn’t -read undeclared inputs or write undeclared outputs. Sandboxing greatly improves -[hermeticity](#hermeticity), but usually has a performance cost, and requires -support from the operating system. The performance cost depends on the platform. -On Linux, it's not significant, but on macOS it can make sandboxing unusable. - -### Skyframe - -[Skyframe](/reference/skyframe) is the core parallel, functional, and incremental evaluation framework of Bazel. - -// TODO: ### Spawn strategy - -### Stamping - -A feature to embed additional information into Bazel-built -[artifacts](#artifact). For example, this can be used for source control, build -time and other workspace or environment-related information for release builds. -Enable through the `--workspace_status_command` flag and [rules](/extending/rules) that -support the stamp attribute. - -### Starlark - -The extension language for writing [rules](/extending/rules) and [macros](#macro). A -restricted subset of Python (syntactically and grammatically) aimed for the -purpose of configuration, and for better performance. Uses the [`.bzl` -file](#bzl-file) extension. [`BUILD` files](#build-file) use an even more -restricted version of Starlark (such as no `def` function definitions), formerly -known as Skylark. - -**See also:** [Starlark language documentation](/rules/language) - -// TODO: ### Starlark rules - -// TODO: ### Starlark rule sandwich - -### Startup flags - -The set of flags specified between `bazel` and the [command](#query-command), -for example, bazel `--host_jvm_debug` build. These flags modify the -[configuration](#configuration) of the Bazel server, so any modification to -startup flags causes a server restart. Startup flags are not specific to any -command. - -### Symbolic macro - -A flavor of [macro](#macro) which is declared with a [rule](#rule)-like -[attribute](#attribute) schema, allows hiding internal declared -[targets](#target) from their own package, and enforces a predictable naming -pattern on the targets that the macro declares. Designed to avoid some of the -problems seen in large [legacy macro](#legacy-macro) codebases. - -**See also:** [Symbolic macro documentation](/extending/macros) - -### Target - -An object that is defined in a [`BUILD` file](#build-file) and identified by a -[label](#label). Targets represent the buildable units of a workspace from -the perspective of the end user. - -A target that is declared by instantiating a [rule](#rule) is called a [rule -target](#rule-target). Depending on the rule, these may be runnable (like -`cc_binary`) or testable (like `cc_test`). Rule targets typically depend on -other targets via their [attributes](#attribute) (such as `deps`); these -dependencies form the basis of the [target graph](#target-graph). - -Aside from rule targets, there are also file targets and [package group](#package-group) -targets. File targets correspond to [artifacts](#artifact) that are referenced -within a `BUILD` file. As a special case, the `BUILD` file of any package is -always considered a source file target in that package. - -Targets are discovered during the [loading phase](#loading-phase). During the -[analysis phase](#analysis-phase), targets are associated with [build -configurations](#configuration) to form [configured -targets](#configured-target). - -### Target graph - -An in-memory graph of [targets](#target) and their dependencies. Produced during -the [loading phase](#loading-phase) and used as an input to the [analysis -phase](#analysis-phase). - -### Target pattern - -A way to specify a group of [targets](#target) on the command line. Commonly -used patterns are `:all` (all rule targets), `:*` (all rule + file targets), -`...` (current [package](#package) and all subpackages recursively). Can be used -in combination, for example, `//...:*` means all rule and file targets in all -packages recursively from the root of the [workspace](#workspace). - -### Tests - -Rule [targets](#target) instantiated from test rules, and therefore contains a -test executable. A return code of zero from the completion of the executable -indicates test success. The exact contract between Bazel and tests (such as test -environment variables, test result collection methods) is specified in the [Test -Encyclopedia](/reference/test-encyclopedia). - -### Toolchain - -A set of tools to build outputs for a language. Typically, a toolchain includes -compilers, linkers, interpreters or/and linters. A toolchain can also vary by -platform, that is, a Unix compiler toolchain's components may differ for the -Windows variant, even though the toolchain is for the same language. Selecting -the right toolchain for the platform is known as toolchain resolution. - -### Top-level target - -A build [target](#target) is top-level if it’s requested on the Bazel command -line. For example, if `//:foo` depends on `//:bar`, and `bazel build //:foo` is -called, then for this build, `//:foo` is top-level, and `//:bar` isn’t -top-level, although both targets will need to be built. An important difference -between top-level and non-top-level targets is that [command -flags](#command-flags) set on the Bazel command line (or via -[.bazelrc](#bazelrc)) will set the [configuration](#configuration) for top-level -targets, but might be modified by a [transition](#transition) for non-top-level -targets. - -### Transition - -A mapping of [configuration](#configuration) state from one value to another. -Enables [targets](#target) in the [build graph](#build-graph) to have different -configurations, even if they were instantiated from the same [rule](#rule). A -common usage of transitions is with *split* transitions, where certain parts of -the [target graph](#target-graph) is forked with distinct configurations for -each fork. For example, one can build an Android APK with native binaries -compiled for ARM and x86 using split transitions in a single build. - -**See also:** [User-defined transitions](/extending/config#user-defined-transitions) - -### Tree artifact - -An [artifact](#artifact) that represents a collection of files. Since these -files are not themselves artifacts, an [action](#action) operating on them must -instead register the tree artifact as its input or output. - -### Visibility - -One of two mechanisms for preventing unwanted dependencies in the build system: -*target visibility* for controlling whether a [target](#target) can be depended -upon by other targets; and *load visibility* for controlling whether a `BUILD` -or `.bzl` file may load a given `.bzl` file. Without context, usually -"visibility" refers to target visibility. - -**See also:** [Visibility documentation](/concepts/visibility) - -### Workspace - -The environment shared by all Bazel commands run from the same [main -repository](#repository). - -Note that historically the concepts of "repository" and "workspace" have been -conflated; the term "workspace" has often been used to refer to the main -repository, and sometimes even used as a synonym of "repository". Such usage -should be avoided for clarity. diff --git a/8.4.2/reference/skyframe.mdx b/8.4.2/reference/skyframe.mdx deleted file mode 100644 index ba9149f..0000000 --- a/8.4.2/reference/skyframe.mdx +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: 'Skyframe' ---- - - - -The parallel evaluation and incrementality model of Bazel. - -## Data model - -The data model consists of the following items: - -* `SkyValue`. Also called nodes. `SkyValues` are immutable objects that - contain all the data built over the course of the build and the inputs of - the build. Examples are: input files, output files, targets and configured - targets. -* `SkyKey`. A short immutable name to reference a `SkyValue`, for example, - `FILECONTENTS:/tmp/foo` or `PACKAGE://foo`. -* `SkyFunction`. Builds nodes based on their keys and dependent nodes. -* Node graph. A data structure containing the dependency relationship between - nodes. -* `Skyframe`. Code name for the incremental evaluation framework Bazel is - based on. - -## Evaluation - -A build is achieved by evaluating the node that represents the build request. - -First, Bazel finds the `SkyFunction` corresponding to the key of the top-level -`SkyKey`. The function then requests the evaluation of the nodes it needs to -evaluate the top-level node, which in turn result in other `SkyFunction` calls, -until the leaf nodes are reached. Leaf nodes are usually ones that represent -input files in the file system. Finally, Bazel ends up with the value of the -top-level `SkyValue`, some side effects (such as output files in the file -system) and a directed acyclic graph of the dependencies between the nodes -involved in the build. - -A `SkyFunction` can request `SkyKeys` in multiple passes if it cannot tell in -advance all of the nodes it needs to do its job. A simple example is evaluating -an input file node that turns out to be a symlink: the function tries to read -the file, realizes that it is a symlink, and thus fetches the file system node -representing the target of the symlink. But that itself can be a symlink, in -which case the original function will need to fetch its target, too. - -The functions are represented in the code by the interface `SkyFunction` and the -services provided to it by an interface called `SkyFunction.Environment`. These -are the things functions can do: - -* Request the evaluation of another node by way of calling `env.getValue`. If - the node is available, its value is returned, otherwise, `null` is returned - and the function itself is expected to return `null`. In the latter case, - the dependent node is evaluated, and then the original node builder is - invoked again, but this time the same `env.getValue` call will return a - non-`null` value. -* Request the evaluation of multiple other nodes by calling `env.getValues()`. - This does essentially the same, except that the dependent nodes are - evaluated in parallel. -* Do computation during their invocation -* Have side effects, for example, writing files to the file system. Care needs - to be taken that two different functions avoid stepping on each other's - toes. In general, write side effects (where data flows outwards from Bazel) - are okay, read side effects (where data flows inwards into Bazel without a - registered dependency) are not, because they are an unregistered dependency - and as such, can cause incorrect incremental builds. - -Well-behaved `SkyFunction` implementations avoid accessing data in any other way -than requesting dependencies (such as by directly reading the file system), -because that results in Bazel not registering the data dependency on the file -that was read, thus resulting in incorrect incremental builds. - -Once a function has enough data to do its job, it should return a non-`null` -value indicating completion. - -This evaluation strategy has a number of benefits: - -* Hermeticity. If functions only request input data by way of depending on - other nodes, Bazel can guarantee that if the input state is the same, the - same data is returned. If all sky functions are deterministic, this means - that the whole build will also be deterministic. -* Correct and perfect incrementality. If all the input data of all functions - is recorded, Bazel can invalidate only the exact set of nodes that need to - be invalidated when the input data changes. -* Parallelism. Since functions can only interact with each other by way of - requesting dependencies, functions that don't depend on each other can be - run in parallel and Bazel can guarantee that the result is the same as if - they were run sequentially. - -## Incrementality - -Since functions can only access input data by depending on other nodes, Bazel -can build up a complete data flow graph from the input files to the output -files, and use this information to only rebuild those nodes that actually need -to be rebuilt: the reverse transitive closure of the set of changed input files. - -In particular, two possible incrementality strategies exist: the bottom-up one -and the top-down one. Which one is optimal depends on how the dependency graph -looks like. - -* During bottom-up invalidation, after a graph is built and the set of changed - inputs is known, all the nodes are invalidated that transitively depend on - changed files. This is optimal if the same top-level node will be built - again. Note that bottom-up invalidation requires running `stat()` on all - input files of the previous build to determine if they were changed. This - can be improved by using `inotify` or a similar mechanism to learn about - changed files. - -* During top-down invalidation, the transitive closure of the top-level node - is checked and only those nodes are kept whose transitive closure is clean. - This is better if the node graph is large, but the next build only needs a - small subset of it: bottom-up invalidation would invalidate the larger graph - of the first build, unlike top-down invalidation, which just walks the small - graph of second build. - -Bazel only does bottom-up invalidation. - -To get further incrementality, Bazel uses _change pruning_: if a node is -invalidated, but upon rebuild, it is discovered that its new value is the same -as its old value, the nodes that were invalidated due to a change in this node -are "resurrected". - -This is useful, for example, if one changes a comment in a C++ file: then the -`.o` file generated from it will be the same, thus, it is unnecessary to call -the linker again. - -## Incremental Linking / Compilation - -The main limitation of this model is that the invalidation of a node is an -all-or-nothing affair: when a dependency changes, the dependent node is always -rebuilt from scratch, even if a better algorithm would exist that would mutate -the old value of the node based on the changes. A few examples where this would -be useful: - -* Incremental linking -* When a single class file changes in a JAR file, it is possible - modify the JAR file in-place instead of building it from scratch again. - -The reason why Bazel does not support these things in a principled way -is twofold: - -* There were limited performance gains. -* Difficulty to validate that the result of the mutation is the same as that - of a clean rebuild would be, and Google values builds that are bit-for-bit - repeatable. - -Until now, it was possible to achieve good enough performance by decomposing an -expensive build step and achieving partial re-evaluation that way. For example, -in an Android app, you can split all the classes into multiple groups and dex -them separately. This way, if classes in a group are unchanged, the dexing does -not have to be redone. - -## Mapping to Bazel concepts - -This is high level summary of the key `SkyFunction` and `SkyValue` -implementations Bazel uses to perform a build: - -* **FileStateValue**. The result of an `lstat()`. For existent files, the - function also computes additional information in order to detect changes to - the file. This is the lowest level node in the Skyframe graph and has no - dependencies. -* **FileValue**. Used by anything that cares about the actual contents or - resolved path of a file. Depends on the corresponding `FileStateValue` and - any symlinks that need to be resolved (such as the `FileValue` for `a/b` - needs the resolved path of `a` and the resolved path of `a/b`). The - distinction between `FileValue` and `FileStateValue` is important because - the latter can be used in cases where the contents of the file are not - actually needed. For example, the file contents are irrelevant when - evaluating file system globs (such as `srcs=glob(["*/*.java"])`). -* **DirectoryListingStateValue**. The result of `readdir()`. Like - `FileStateValue`, this is the lowest level node and has no dependencies. -* **DirectoryListingValue**. Used by anything that cares about the entries of - a directory. Depends on the corresponding `DirectoryListingStateValue`, as - well as the associated `FileValue` of the directory. -* **PackageValue**. Represents the parsed version of a BUILD file. Depends on - the `FileValue` of the associated `BUILD` file, and also transitively on any - `DirectoryListingValue` that is used to resolve the globs in the package - (the data structure representing the contents of a `BUILD` file internally). -* **ConfiguredTargetValue**. Represents a configured target, which is a tuple - of the set of actions generated during the analysis of a target and - information provided to dependent configured targets. Depends on the - `PackageValue` the corresponding target is in, the `ConfiguredTargetValues` - of direct dependencies, and a special node representing the build - configuration. -* **ArtifactValue**. Represents a file in the build, be it a source or an - output artifact. Artifacts are almost equivalent to files, and are used to - refer to files during the actual execution of build steps. Source files - depends on the `FileValue` of the associated node, and output artifacts - depend on the `ActionExecutionValue` of whatever action generates the - artifact. -* **ActionExecutionValue**. Represents the execution of an action. Depends on - the `ArtifactValues` of its input files. The action it executes is contained - within its SkyKey, which is contrary to the concept that SkyKeys should be - small. Note that `ActionExecutionValue` and `ArtifactValue` are unused if - the execution phase does not run. - -As a visual aid, this diagram shows the relationships between -SkyFunction implementations after a build of Bazel itself: - -![A graph of SkyFunction implementation relationships](/reference/skyframe.png) diff --git a/8.4.2/release/backward-compatibility.mdx b/8.4.2/release/backward-compatibility.mdx deleted file mode 100644 index af653cc..0000000 --- a/8.4.2/release/backward-compatibility.mdx +++ /dev/null @@ -1,76 +0,0 @@ ---- -title: 'Backward Compatibility' ---- - - - -This page provides information about how to handle backward compatibility, -including migrating from one release to another and how to communicate -incompatible changes. - -Bazel is evolving. Minor versions released as part of an [LTS major -version](/release#bazel-versioning) are fully backward-compatible. New major LTS -releases may contain incompatible changes that require some migration effort. -For more information about Bazel's release model, please check out the [Release -Model](/release) page. - -## Summary - -1. It is recommended to use `--incompatible_*` flags for breaking changes. -1. For every `--incompatible_*` flag, a GitHub issue explains the change in - behavior and aims to provide a migration recipe. -1. Incompatible flags are recommended to be back-ported to the latest LTS - release without enabling the flag by default. -1. APIs and behavior guarded by an `--experimental_*` flag can change at any - time. -1. Never run production builds with `--experimental_*` or `--incompatible_*` - flags. - -## How to follow this policy - -* [For Bazel users - how to update Bazel](/install/bazelisk) -* [For contributors - best practices for incompatible changes](/contribute/breaking-changes) -* [For release managers - how to update issue labels and release](https://github.com/bazelbuild/continuous-integration/tree/master/docs/release-playbook.%6D%64) - -## What is stable functionality? - -In general, APIs or behaviors without `--experimental_...` flags are considered -stable, supported features in Bazel. - -This includes: - -* Starlark language and APIs -* Rules bundled with Bazel -* Bazel APIs such as Remote Execution APIs or Build Event Protocol -* Flags and their semantics - -## Incompatible changes and migration recipes - -For every incompatible change in a new release, the Bazel team aims to provide a -_migration recipe_ that helps you update your code (`BUILD` and `.bzl` files, as -well as any Bazel usage in scripts, usage of Bazel API, and so on). - -Incompatible changes should have an associated `--incompatible_*` flag and a -corresponding GitHub issue. - -The incompatible flag and relevant changes are recommended to be back-ported to -the latest LTS release without enabling the flag by default. This allows users -to migrate for the incompatible changes before the next LTS release is -available. - -## Communicating incompatible changes - -The primary source of information about incompatible changes are GitHub issues -marked with an ["incompatible-change" -label](https://github.com/bazelbuild/bazel/issues?q=label%3Aincompatible-change). - -For every incompatible change, the issue specifies the following: - -* Name of the flag controlling the incompatible change -* Description of the changed functionality -* Migration recipe - -When an incompatible change is ready for migration with Bazel at HEAD -(therefore, also with the next Bazel rolling release), it should be marked with -the `migration-ready` label. The incompatible change issue is closed when the -incompatible flag is flipped at HEAD. diff --git a/8.4.2/release/index.mdx b/8.4.2/release/index.mdx deleted file mode 100644 index a3cc526..0000000 --- a/8.4.2/release/index.mdx +++ /dev/null @@ -1,216 +0,0 @@ ---- -title: 'Release Model' ---- - - - -As announced in [the original blog -post](https://blog.bazel.build/2020/11/10/long-term-support-release.html), Bazel -4.0 and higher versions provides support for two release tracks: rolling -releases and long term support (LTS) releases. This page covers the latest -information about Bazel's release model. - -## Support matrix - -| LTS release | Support stage | Latest version | End of support | -| ----------- | ------------- | -------------- | -------------- | -| Bazel 9 | Rolling| [Check rolling release page](/release/rolling) | N/A | -| Bazel 8 | Active| [8.0.0](https://github.com/bazelbuild/bazel/releases/tag/8.0.0) | December 2027 | -| Bazel 7 | Maintenance| [7.4.1](https://github.com/bazelbuild/bazel/releases/tag/7.4.1) | Dec 2026 | -| Bazel 6 | Maintenance | [6.5.0](https://github.com/bazelbuild/bazel/releases/tag/6.5.0) | Dec 2025 | -| Bazel 5 | Maintenance | [5.4.1](https://github.com/bazelbuild/bazel/releases/tag/5.4.1) | Jan 2025 | -| Bazel 4 | Deprecated | [4.2.4](https://github.com/bazelbuild/bazel/releases/tag/4.2.4) | Jan 2024 | - -All Bazel LTS releases can be found on the [release -page](https://github.com/bazelbuild/bazel/releases) on GitHub. - -Note: Bazel version older than Bazel 5 are no longer supported, Bazel users are -recommended to upgrade to the latest LTS release or use rolling releases if you -want to keep up with the latest changes at HEAD. - -## Release versioning - -Bazel uses a _major.minor.patch_ [Semantic -Versioning](https://semver.org/) scheme. - -* A _major release_ contains features that are not backward compatible with - the previous release. Each major Bazel version is an LTS release. -* A _minor release_ contains backward-compatible bug fixes and features - back-ported from the main branch. -* A _patch release_ contains critical bug fixes. - -Additionally, pre-release versions are indicated by appending a hyphen and a -date suffix to the next major version number. - -For example, a new release of each type would result in these version numbers: - -* Major: 6.0.0 -* Minor: 6.1.0 -* Patch: 6.1.2 -* Pre-release: 7.0.0-pre.20230502.1 - -## Support stages - -For each major Bazel version, there are four support stages: - -* **Rolling**: This major version is still in pre-release, the Bazel team - publishes rolling releases from HEAD. -* **Active**: This major version is the current active LTS release. The Bazel - team backports important features and bug fixes into its minor releases. -* **Maintenance**: This major version is an old LTS release in maintenance - mode. The Bazel team only promises to backport critical bug fixes for - security issues and OS-compatibility issues into this LTS release. -* **Deprecated**: The Bazel team no longer provides support for this major - version, all users should migrate to newer Bazel LTS releases. - -## Release cadence - -Bazel regularly publish releases for two release tracks. - -### Rolling releases - -* Rolling releases are coordinated with Google Blaze release and are released - from HEAD around every two weeks. It is a preview of the next Bazel LTS - release. -* Rolling releases can ship incompatible changes. Incompatible flags are - recommended for major breaking changes, rolling out incompatible changes - should follow our [backward compatibility - policy](/release/backward-compatibility). - -### LTS releases - -* _Major release_: A new LTS release is expected to be cut from HEAD roughly - every - 12 months. Once a new LTS release is out, it immediately enters the Active - stage, and the previous LTS release enters the Maintenance stage. -* _Minor release_: New minor verions on the Active LTS track are expected to - be released once every 2 months. -* _Patch release_: New patch versions for LTS releases in Active and - Maintenance stages are expected to be released on demand for critical bug - fixes. -* A Bazel LTS release enters the Deprecated stage after being in ​​the - Maintenance stage for 2 years. - -For planned releases, please check our [release -issues](https://github.com/bazelbuild/bazel/issues?q=is%3Aopen+is%3Aissue+label%3Arelease) -on Github. - -## Release procedure & policies - -For rolling releases, the process is straightforward: about every two weeks, a -new release is created, aligning with the same baseline as the Google internal -Blaze release. Due to the rapid release schedule, we don't backport any changes -to rolling releases. - -For LTS releases, the procedure and policies below are followed: - -1. Determine a baseline commit for the release. - * For a new major LTS release, the baseline commit is the HEAD of the main - branch. - * For a minor or patch release, the baseline commit is the HEAD of the - current latest version of the same LTS release. -1. Create a release branch in the name of `release-` from the baseline - commit. -1. Backport changes via PRs to the release branch. - * The community can suggest certain commits to be back-ported by replying - "`@bazel-io flag`" on relevant GitHub issues or PRs to mark them as potential - release blockers, the Bazel team triages them and decide whether to - back-port the commits. - * Only backward-compatible commits on the main branch can be back-ported, - additional minor changes to resolve merge conflicts are acceptable. -1. Backport changes using Cherry-Pick Request Issue for Bazel maintainers. - * Bazel maintainers can request to cherry-pick specific commit(s) - to a release branch. This process is initiated by creating a - cherry-pick request on GitHub. Here's how to do it. - 1. Open the [cherry-pick request](https://github.com/bazelbuild/bazel/issues/new?assignees=&labels=&projects=&template=cherry_pick_request.yml) - 2. Fill in the request details - * Title: Provide a concise and descriptive title for the request. - * Commit ID(s): Enter the ID(s) of the commit(s) you want to - cherry-pick. If there are multiple commits, then separate - them with commas. - * Category: Specify the category of the request. - * Reviewer(s): For multiple reviewers, separate their GitHub - ID's with commas. - 3. Set the milestone - * Find the "Milestone" section and click the setting. - * Select the appropriate X.Y.Z release blockers. This action - triggers the cherry-pick bot to process your request - for the "release-X.Y.Z" branch. - 4. Submit the Issue - * Once all details are filled in and the miestone is set, - submit the issue. - - * The cherry-pick bot will process the request and notify - if the commit(s) are eligible for cherry-picking. If - the commits are cherry-pickable, which means there's no - merge conflict while cherry-picking the commit, then - the bot will create a new pull request. When the pull - request is approved by a member of the Bazel team, the - commits are cherry-picked and merged to the release branch. - For a visual example of a completed cherry-pick request, - refer to this - [example](https://github.com/bazelbuild/bazel/issues/20230) - . - -1. Identify release blockers and fix issues found on the release branch. - * The release branch is tested with the same test suite in - [postsubmit](https://buildkite.com/bazel/bazel-bazel) and - [downstream test pipeline] - (https://buildkite.com/bazel/bazel-at-head-plus-downstream) - on Bazel CI. The Bazel team monitors testing results of the release - branch and fixes any regressions found. -1. Create a new release candidate from the release branch when all known - release blockers are resolved. - * The release candidate is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors community bug reports for the candidate. - * If new release blockers are identified, go back to the last step and - create a new release candidate after resolving all the issues. - * New features are not allowed to be added to the release branch after the - first release candidate is created; cherry-picks are limited to critical - fixes only. If a cherry-pick is needed, the requester must answer the - following questions: Why is this change critical, and what benefits does - it provide? What is the likelihood of this change introducing a - regression? -1. Push the release candidate as the official release if no further release - blockers are found - * For patch releases, push the release at least two business days after - the last release candidate is out. - * For major and minor releases, push the release two business days after - the last release candidate is out, but not earlier than one week after - the first release candidate is out. - * The release is only pushed on a day where the next day is a business - day. - * The release is announced on - [bazel-discuss](https://groups.google.com/g/bazel-discuss), - the Bazel team monitors and addresses community bug reports for the new - release. - -## Report regressions - -If a user finds a regression in a new Bazel release, release candidate or even -Bazel at HEAD, please file a bug on -[GitHub](https://github.com/bazelbuild/bazel/issues). You can use -Bazelisk to bisect the culprit commit and include this information in the bug -report. - -For example, if your build succeeds with Bazel 6.1.0 but fails with the second -release candidate of 6.2.0, you can do bisect via - -```bash -bazelisk --bisect=6.1.0..release-6.2.0rc2 build //foo:bar -``` - -You can set `BAZELISK_SHUTDOWN` or `BAZELISK_CLEAN` environment variable to run -corresponding bazel commands to reset the build state if it's needed to -reproduce the issue. For more details, check out documentation about Bazelisk -[bisect feature] (https://github.com/bazelbuild/bazelisk#--bisect). - -Remember to upgrade Bazelisk to the latest version to use the bisect -feature. - -## Rule compatibility - -If you are a rule authors and want to maintain compatibility with different -Bazel versions, please check out the [Rule -Compatibility](/release/rule-compatibility) page. diff --git a/8.4.2/release/rule-compatibility.mdx b/8.4.2/release/rule-compatibility.mdx deleted file mode 100644 index 05a8a95..0000000 --- a/8.4.2/release/rule-compatibility.mdx +++ /dev/null @@ -1,90 +0,0 @@ ---- -title: 'Rule Compatibility' ---- - - - -Bazel Starlark rules can break compatibility with Bazel LTS releases in the -following two scenarios: - -1. The rule breaks compatibility with future LTS releases because a feature it - depends on is removed from Bazel at HEAD. -1. The rule breaks compatibility with the current or older LTS releases because - a feature it depends on is only available in newer Bazel LTS releases. - -Meanwhile, the rule itself can ship incompatible changes for their users as -well. When combined with breaking changes in Bazel, upgrading the rule version -and Bazel version can often be a source of frustration for Bazel users. This -page covers how rules authors should maintain rule compatibility with Bazel to -make it easier for users to upgrade Bazel and rules. - -## Manageable migration process - -While it's obviously not feasible to guarantee compatibility between every -version of Bazel and every version of the rule, our aim is to ensure that the -migration process remains manageable for Bazel users. A manageable migration -process is defined as a process where **users are not forced to upgrade the -rule's major version and Bazel's major version simultaneously**, thereby -allowing users to handle incompatible changes from one source at a time. - -For example, with the following compatibility matrix: - -* Migrating from rules_foo 1.x + Bazel 4.x to rules_foo 2.x + Bazel 5.x is not - considered manageable, as the users need to upgrade the major version of - rules_foo and Bazel at the same time. -* Migrating from rules_foo 2.x + Bazel 5.x to rules_foo 3.x + Bazel 6.x is - considered manageable, as the users can first upgrade rules_foo from 2.x to - 3.x without changing the major Bazel version, then upgrade Bazel from 5.x to - 6.x. - -| | rules_foo 1.x | rules_foo 2.x | rules_foo 3.x | HEAD | -| --- | --- | --- | --- | --- | -| Bazel 4.x | ✅ | ❌ | ❌ | ❌ | -| Bazel 5.x | ❌ | ✅ | ✅ | ❌ | -| Bazel 6.x | ❌ | ❌ | ✅ | ✅ | -| HEAD | ❌ | ❌ | ❌ | ✅ | - -❌: No version of the major rule version is compatible with the Bazel LTS -release. - -✅: At least one version of the rule is compatible with the latest version of the -Bazel LTS release. - -## Best practices - -As Bazel rules authors, you can ensure a manageable migration process for users -by following these best practices: - -1. The rule should follow [Semantic - Versioning](https://semver.org/): minor versions of the same - major version are backward compatible. -1. The rule at HEAD should be compatible with the latest Bazel LTS release. -1. The rule at HEAD should be compatible with Bazel at HEAD. To achieve this, - you can - * Set up your own CI testing with Bazel at HEAD - * Add your project to [Bazel downstream - testing](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md); - the Bazel team files issues to your project if breaking changes in Bazel - affect your project, and you must follow our [downstream project - policies](https://github.com/bazelbuild/continuous-integration/blob/master/docs/downstream-testing.md#downstream-project-policies) - to address issues timely. -1. The latest major version of the rule must be compatible with the latest - Bazel LTS release. -1. A new major version of the rule should be compatible with the last Bazel LTS - release supported by the previous major version of the rule. - -Achieving 2. and 3. is the most important task since it allows achieving 4. and -5. naturally. - -To make it easier to keep compatibility with both Bazel at HEAD and the latest -Bazel LTS release, rules authors can: - -* Request backward-compatible features to be back-ported to the latest LTS - release, check out [release process](/release#release-procedure-policies) - for more details. -* Use [bazel_features](https://github.com/bazel-contrib/bazel_features) - to do Bazel feature detection. - -In general, with the recommended approaches, rules should be able to migrate for -Bazel incompatible changes and make use of new Bazel features at HEAD without -dropping compatibility with the latest Bazel LTS release. diff --git a/8.4.2/remote/bep-examples.mdx b/8.4.2/remote/bep-examples.mdx deleted file mode 100644 index faf11bf..0000000 --- a/8.4.2/remote/bep-examples.mdx +++ /dev/null @@ -1,200 +0,0 @@ ---- -title: 'Build Event Protocol Examples' ---- - - - -The full specification of the Build Event Protocol can be found in its protocol -buffer definition. However, it might be helpful to build up some intuition -before looking at the specification. - -Consider a simple Bazel workspace that consists of two empty shell scripts -`foo.sh` and `foo_test.sh` and the following `BUILD` file: - -```bash -sh_library( - name = "foo_lib", - srcs = ["foo.sh"], -) - -sh_test( - name = "foo_test", - srcs = ["foo_test.sh"], - deps = [":foo_lib"], -) -``` - -When running `bazel test ...` on this project the build graph of the generated -build events will resemble the graph below. The arrows indicate the -aforementioned parent and child relationship. Note that some build events and -most fields have been omitted for brevity. - -![bep-graph](/docs/images/bep-graph.png "BEP graph") - -**Figure 1.** BEP graph. - -Initially, a `BuildStarted` event is published. The event informs us that the -build was invoked through the `bazel test` command and announces child events: - -* `OptionsParsed` -* `WorkspaceStatus` -* `CommandLine` -* `UnstructuredCommandLine` -* `BuildMetadata` -* `BuildFinished` -* `PatternExpanded` -* `Progress` - -The first three events provide information about how Bazel was invoked. - -The `PatternExpanded` build event provides insight -into which specific targets the `...` pattern expanded to: -`//foo:foo_lib` and `//foo:foo_test`. It does so by declaring two -`TargetConfigured` events as children. Note that the `TargetConfigured` event -declares the `Configuration` event as a child event, even though `Configuration` -has been posted before the `TargetConfigured` event. - -Besides the parent and child relationship, events may also refer to each other -using their build event identifiers. For example, in the above graph the -`TargetComplete` event refers to the `NamedSetOfFiles` event in its `fileSets` -field. - -Build events that refer to files don’t usually embed the file -names and paths in the event. Instead, they contain the build event identifier -of a `NamedSetOfFiles` event, which will then contain the actual file names and -paths. The `NamedSetOfFiles` event allows a set of files to be reported once and -referred to by many targets. This structure is necessary because otherwise in -some cases the Build Event Protocol output size would grow quadratically with -the number of files. A `NamedSetOfFiles` event may also not have all its files -embedded, but instead refer to other `NamedSetOfFiles` events through their -build event identifiers. - -Below is an instance of the `TargetComplete` event for the `//foo:foo_lib` -target from the above graph, printed in protocol buffer’s JSON representation. -The build event identifier contains the target as an opaque string and refers to -the `Configuration` event using its build event identifier. The event does not -announce any child events. The payload contains information about whether the -target was built successfully, the set of output files, and the kind of target -built. - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "0" - }] - }], - "targetKind": "sh_library rule" - } -} -``` - -## Aspect Results in BEP - -Ordinary builds evaluate actions associated with `(target, configuration)` -pairs. When building with [aspects](/extending/aspects) enabled, Bazel -additionally evaluates targets associated with `(target, configuration, -aspect)` triples, for each target affected by a given enabled aspect. - -Evaluation results for aspects are available in BEP despite the absence of -aspect-specific event types. For each `(target, configuration)` pair with an -applicable aspect, Bazel publishes an additional `TargetConfigured` and -`TargetComplete` event bearing the result from applying the aspect to the -target. For example, if `//:foo_lib` is built with -`--aspects=aspects/myaspect.bzl%custom_aspect`, this event would also appear in -the BEP: - -```json -{ - "id": { - "targetCompleted": { - "label": "//foo:foo_lib", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - }, - "aspect": "aspects/myaspect.bzl%custom_aspect" - } - }, - "completed": { - "success": true, - "outputGroup": [{ - "name": "default", - "fileSets": [{ - "id": "1" - }] - }] - } -} -``` - -Note: The only difference between the IDs is the presence of the `aspect` -field. A tool that does not check the `aspect` ID field and accumulates output -files by target may conflate target outputs with aspect outputs. - -## Consuming `NamedSetOfFiles` - -Determining the artifacts produced by a given target (or aspect) is a common -BEP use-case that can be done efficiently with some preparation. This section -discusses the recursive, shared structure offered by the `NamedSetOfFiles` -event, which matches the structure of a Starlark [Depset](/extending/depsets). - -Consumers must take care to avoid quadratic algorithms when processing -`NamedSetOfFiles` events because large builds can contain tens of thousands of -such events, requiring hundreds of millions operations in a traversal with -quadratic complexity. - -![namedsetoffiles-bep-graph](/docs/images/namedsetoffiles-bep-graph.png "NamedSetOfFiles BEP graph") - -**Figure 2.** `NamedSetOfFiles` BEP graph. - -A `NamedSetOfFiles` event always appears in the BEP stream *before* a -`TargetComplete` or `NamedSetOfFiles` event that references it. This is the -inverse of the "parent-child" event relationship, where all but the first event -appears after at least one event announcing it. A `NamedSetOfFiles` event is -announced by a `Progress` event with no semantics. - -Given these ordering and sharing constraints, a typical consumer must buffer all -`NamedSetOfFiles` events until the BEP stream is exhausted. The following JSON -event stream and Python code demonstrate how to populate a map from -target/aspect to built artifacts in the "default" output group, and how to -process the outputs for a subset of built targets/aspects: - -```python -named_sets = {} # type: dict[str, NamedSetOfFiles] -outputs = {} # type: dict[str, dict[str, set[str]]] - -for event in stream: - kind = event.id.WhichOneof("id") - if kind == "named_set": - named_sets[event.id.named_set.id] = event.named_set_of_files - elif kind == "target_completed": - tc = event.id.target_completed - target_id = (tc.label, tc.configuration.id, tc.aspect) - outputs[target_id] = {} - for group in event.completed.output_group: - outputs[target_id][group.name] = {fs.id for fs in group.file_sets} - -for result_id in relevant_subset(outputs.keys()): - visit = outputs[result_id].get("default", []) - seen_sets = set(visit) - while visit: - set_name = visit.pop() - s = named_sets[set_name] - for f in s.files: - process_file(result_id, f) - for fs in s.file_sets: - if fs.id not in seen_sets: - visit.add(fs.id) - seen_sets.add(fs.id) -``` diff --git a/8.4.2/remote/bep-glossary.mdx b/8.4.2/remote/bep-glossary.mdx deleted file mode 100644 index 3bd11ee..0000000 --- a/8.4.2/remote/bep-glossary.mdx +++ /dev/null @@ -1,416 +0,0 @@ ---- -title: 'Build Event Protocol Glossary' ---- - - - -Each BEP event type has its own semantics, minimally documented in -[build\_event\_stream.proto](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto). -The following glossary describes each event type. - -## Aborted - -Unlike other events, `Aborted` does not have a corresponding ID type, because -the `Aborted` event *replaces* events of other types. This event indicates that -the build terminated early and the event ID it appears under was not produced -normally. `Aborted` contains an enum and human-friendly description to explain -why the build did not complete. - -For example, if a build is evaluating a target when the user interrupts Bazel, -BEP contains an event like the following: - -```json -{ - "id": { - "targetCompleted": { - "label": "//:foo", - "configuration": { - "id": "544e39a7f0abdb3efdd29d675a48bc6a" - } - } - }, - "aborted": { - "reason": "USER_INTERRUPTED" - } -} -``` - -## ActionExecuted - -Provides details about the execution of a specific -[Action](/rules/lib/actions) in a build. By default, this event is -included in the BEP only for failed actions, to support identifying the root cause -of build failures. Users may set the `--build_event_publish_all_actions` flag -to include all `ActionExecuted` events. - -## BuildFinished - -A single `BuildFinished` event is sent after the command is complete and -includes the exit code for the command. This event provides authoritative -success/failure information. - -## BuildMetadata - -Contains the parsed contents of the `--build_metadata` flag. This event exists -to support Bazel integration with other tooling by plumbing external data (such as -identifiers). - -## BuildMetrics - -A single `BuildMetrics` event is sent at the end of every command and includes -counters/gauges useful for quantifying the build tool's behavior during the -command. These metrics indicate work actually done and does not count cached -work that is reused. - -Note that `memory_metrics` may not be populated if there was no Java garbage -collection during the command's execution. Users may set the -`--memory_profile=/dev/null` option which forces the garbage -collector to run at the end of the command to populate `memory_metrics`. - -```json -{ - "id": { - "buildMetrics": {} - }, - "buildMetrics": { - "actionSummary": { - "actionsExecuted": "1" - }, - "memoryMetrics": {}, - "targetMetrics": { - "targetsLoaded": "9", - "targetsConfigured": "19" - }, - "packageMetrics": { - "packagesLoaded": "5" - }, - "timingMetrics": { - "cpuTimeInMs": "1590", - "wallTimeInMs": "359" - } - } -} -``` - -## BuildStarted - -The first event in a BEP stream, `BuildStarted` includes metadata describing the -command before any meaningful work begins. - -## BuildToolLogs - -A single `BuildToolLogs` event is sent at the end of a command, including URIs -of files generated by the build tool that may aid in understanding or debugging -build tool behavior. Some information may be included inline. - -```json -{ - "id": { - "buildToolLogs": {} - }, - "lastMessage": true, - "buildToolLogs": { - "log": [ - { - "name": "elapsed time", - "contents": "MC4xMjEwMDA=" - }, - { - "name": "process stats", - "contents": "MSBwcm9jZXNzOiAxIGludGVybmFsLg==" - }, - { - "name": "command.profile.gz", - "uri": "file:///tmp/.cache/bazel/_bazel_foo/cde87985ad0bfef34eacae575224b8d1/command.profile.gz" - } - ] - } -} -``` - -## CommandLine - -The BEP contains multiple `CommandLine` events containing representations of all -command-line arguments (including options and uninterpreted arguments). -Each `CommandLine` event has a label in its `StructuredCommandLineId` that -indicates which representation it conveys; three such events appear in the BEP: - -* `"original"`: Reconstructed commandline as Bazel received it from the Bazel - client, without startup options sourced from .rc files. -* `"canonical"`: The effective commandline with .rc files expanded and - invocation policy applied. -* `"tool"`: Populated from the `--experimental_tool_command_line` option. This - is useful to convey the command-line of a tool wrapping Bazel through the BEP. - This could be a base64-encoded `CommandLine` binary protocol buffer message - which is used directly, or a string which is parsed but not interpreted (as - the tool's options may differ from Bazel's). - -## Configuration - -A `Configuration` event is sent for every [`configuration`](/extending/config) -used in the top-level targets in a build. At least one configuration event is -always be present. The `id` is reused by the `TargetConfigured` and -`TargetComplete` event IDs and is necessary to disambiguate those events in -multi-configuration builds. - -```json -{ - "id": { - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - }, - "configuration": { - "mnemonic": "k8-fastbuild", - "platformName": "k8", - "cpu": "k8", - "makeVariable": { - "COMPILATION_MODE": "fastbuild", - "TARGET_CPU": "k8", - "GENDIR": "bazel-out/k8-fastbuild/bin", - "BINDIR": "bazel-out/k8-fastbuild/bin" - } - } -} -``` - -## ConvenienceSymlinksIdentified - -**Experimental.** If the `--experimental_convenience_symlinks_bep_event` -option is set, a single `ConvenienceSymlinksIdentified` event is produced by -`build` commands to indicate how symlinks in the workspace should be managed. -This enables building tools that invoke Bazel remotely then arrange the local -workspace as if Bazel had been run locally. - -```json -{ - "id": { - "convenienceSymlinksIdentified":{} - }, - "convenienceSymlinksIdentified": { - "convenienceSymlinks": [ - { - "path": "bazel-bin", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/bin" - }, - { - "path": "bazel-genfiles", - "action": "CREATE", - "target": "execroot/google3/bazel-out/k8-fastbuild/genfiles" - }, - { - "path": "bazel-out", - "action": "CREATE", - "target": "execroot/google3/bazel-out" - } - ] - } -} -``` - -## Fetch - -Indicates that a Fetch operation occurred as a part of the command execution. -Unlike other events, if a cached fetch result is re-used, this event does not -appear in the BEP stream. - -## NamedSetOfFiles - -`NamedSetOfFiles` events report a structure matching a -[`depset`](/extending/depsets) of files produced during command evaluation. -Transitively included depsets are identified by `NamedSetOfFilesId`. - -For more information on interpreting a stream's `NamedSetOfFiles` events, see the -[BEP examples page](/remote/bep-examples#consuming-namedsetoffiles). - -## OptionsParsed - -A single `OptionsParsed` event lists all options applied to the command, -separating startup options from command options. It also includes the -[InvocationPolicy](/reference/command-line-reference#flag--invocation_policy), if any. - -```json -{ - "id": { - "optionsParsed": {} - }, - "optionsParsed": { - "startupOptions": [ - "--max_idle_secs=10800", - "--noshutdown_on_low_sys_mem", - "--connect_timeout_secs=30", - "--output_user_root=/tmp/.cache/bazel/_bazel_foo", - "--output_base=/tmp/.cache/bazel/_bazel_foo/a61fd0fbee3f9d6c1e30d54b68655d35", - "--deep_execroot", - "--idle_server_tasks", - "--write_command_log", - "--nowatchfs", - "--nofatal_event_bus_exceptions", - "--nowindows_enable_symlinks", - "--noclient_debug", - ], - "cmdLine": [ - "--enable_platform_specific_config", - "--build_event_json_file=/tmp/bep.json" - ], - "explicitCmdLine": [ - "--build_event_json_file=/tmp/bep.json" - ], - "invocationPolicy": {} - } -} -``` - -## PatternExpanded - -`PatternExpanded` events indicate the set of all targets that match the patterns -supplied on the commandline. For successful commands, a single event is present -with all patterns in the `PatternExpandedId` and all targets in the -`PatternExpanded` event's *children*. If the pattern expands to any -`test_suite`s the set of test targets included by the `test_suite`. For each -pattern that fails to resolve, BEP contains an additional [`Aborted`](#aborted) -event with a `PatternExpandedId` identifying the pattern. - -```json -{ - "id": { - "pattern": { - "pattern":["//base:all"] - } - }, - "children": [ - {"targetConfigured":{"label":"//base:foo"}}, - {"targetConfigured":{"label":"//base:foobar"}} - ], - "expanded": { - "testSuiteExpansions": { - "suiteLabel": "//base:suite", - "testLabels": "//base:foo_test" - } - } -} -``` - -## Progress - -Progress events contain the standard output and standard error produced by Bazel -during command execution. These events are also auto-generated as needed to -announce events that have not been announced by a logical "parent" event (in -particular, [NamedSetOfFiles](#namedsetoffiles).) - -## TargetComplete - -For each `(target, configuration, aspect)` combination that completes the -execution phase, a `TargetComplete` event is included in BEP. The event contains -the target's success/failure and the target's requested output groups. - -```json -{ - "id": { - "targetCompleted": { - "label": "//examples/py:bep", - "configuration": { - "id": "a5d130b0966b4a9ca2d32725aa5baf40e215bcfc4d5cdcdc60f5cc5b4918903b" - } - } - }, - "completed": { - "success": true, - "outputGroup": [ - { - "name": "default", - "fileSets": [ - { - "id": "0" - } - ] - } - ] - } -} -``` - -## TargetConfigured - -For each Target that completes the analysis phase, a `TargetConfigured` event is -included in BEP. This is the authoritative source for a target's "rule kind" -attribute. The configuration(s) applied to the target appear in the announced -*children* of the event. - -For example, building with the `--experimental_multi_cpu` options may produce -the following `TargetConfigured` event for a single target with two -configurations: - -```json -{ - "id": { - "targetConfigured": { - "label": "//starlark_configurations/multi_arch_binary:foo" - } - }, - "children": [ - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "c62b30c8ab7b9fc51a05848af9276529842a11a7655c71327ade26d7c894c818" - } - } - }, - { - "targetCompleted": { - "label": "//starlark_configurations/multi_arch_binary:foo", - "configuration": { - "id": "eae0379b65abce68d54e0924c0ebcbf3d3df26c6e84ef7b2be51e8dc5b513c99" - } - } - } - ], - "configured": { - "targetKind": "foo_binary rule" - } -} -``` - -## TargetSummary - -For each `(target, configuration)` pair that is executed, a `TargetSummary` -event is included with an aggregate success result encompassing the configured -target's execution and all aspects applied to that configured target. - -## TestResult - -If testing is requested, a `TestResult` event is sent for each test attempt, -shard, and run per test. This allows BEP consumers to identify precisely which -test actions failed their tests and identify the test outputs (such as logs, -test.xml files) for each test action. - -## TestSummary - -If testing is requested, a `TestSummary` event is sent for each test `(target, -configuration)`, containing information necessary to interpret the test's -results. The number of attempts, shards and runs per test are included to enable -BEP consumers to differentiate artifacts across these dimensions. The attempts -and runs per test are considered while producing the aggregate `TestStatus` to -differentiate `FLAKY` tests from `FAILED` tests. - -## UnstructuredCommandLine - -Unlike [CommandLine](#commandline), this event carries the unparsed commandline -flags in string form as encountered by the build tool after expanding all -[`.bazelrc`](/run/bazelrc) files and -considering the `--config` flag. - -The `UnstructuredCommandLine` event may be relied upon to precisely reproduce a -given command execution. - -## WorkspaceConfig - -A single `WorkspaceConfig` event contains configuration information regarding the -workspace, such as the execution root. - -## WorkspaceStatus - -A single `WorkspaceStatus` event contains the result of the [workspace status -command](/docs/user-manual#workspace-status). diff --git a/8.4.2/remote/bep.mdx b/8.4.2/remote/bep.mdx deleted file mode 100644 index bafdaa9..0000000 --- a/8.4.2/remote/bep.mdx +++ /dev/null @@ -1,148 +0,0 @@ ---- -title: 'Build Event Protocol' ---- - - - -The [Build Event -Protocol](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto) -(BEP) allows third-party programs to gain insight into a Bazel invocation. For -example, you could use the BEP to gather information for an IDE -plugin or a dashboard that displays build results. - -The protocol is a set of [protocol -buffer](https://developers.google.com/protocol-buffers/) messages with some -semantics defined on top of it. It includes information about build and test -results, build progress, the build configuration and much more. The BEP is -intended to be consumed programmatically and makes parsing Bazel’s -command line output a thing of the past. - -The Build Event Protocol represents information about a build as events. A -build event is a protocol buffer message consisting of a build event identifier, -a set of child event identifiers, and a payload. - -* __Build Event Identifier:__ Depending on the kind of build event, it might be -an [opaque -string](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L131-L140) -or [structured -information](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L194-L205) -revealing more about the build event. A build event identifier is unique within -a build. - -* __Children:__ A build event may announce other build events, by including -their build event identifiers in its [children -field](https://github.com/bazelbuild/bazel/blob/7.1.0/src/main/java/com/google/devtools/build/lib/buildeventstream/proto/build_event_stream.proto#L1276). -For example, the `PatternExpanded` build event announces the targets it expands -to as children. The protocol guarantees that all events, except for the first -event, are announced by a previous event. - -* __Payload:__ The payload contains structured information about a build event, -encoded as a protocol buffer message specific to that event. Note that the -payload might not be the expected type, but could be an `Aborted` message -if the build aborted prematurely. - -### Build event graph - -All build events form a directed acyclic graph through their parent and child -relationship. Every build event except for the initial build event has one or -more parent events. Please note that not all parent events of a child event must -necessarily be posted before it. When a build is complete (succeeded or failed) -all announced events will have been posted. In case of a Bazel crash or a failed -network transport, some announced build events may never be posted. - -The event graph's structure reflects the lifecycle of a command. Every BEP -graph has the following characteristic shape: - -1. The root event is always a [`BuildStarted`](/remote/bep-glossary#buildstarted) - event. All other events are its descendants. -1. Immediate children of the BuildStarted event contain metadata about the - command. -1. Events containing data produced by the command, such as files built and test - results, appear before the [`BuildFinished`](/remote/bep-glossary#buildfinished) - event. -1. The [`BuildFinished`](/remote/bep-glossary#buildfinished) event *may* be followed - by events containing summary information about the build (for example, metric - or profiling data). - -## Consuming Build Event Protocol - -### Consume in binary format - -To consume the BEP in a binary format: - -1. Have Bazel serialize the protocol buffer messages to a file by specifying the - option `--build_event_binary_file=/path/to/file`. The file will contain - serialized protocol buffer messages with each message being length delimited. - Each message is prefixed with its length encoded as a variable length integer. - This format can be read using the protocol buffer library’s - [`parseDelimitedFrom(InputStream)`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractParser#parseDelimitedFrom-java.io.InputStream-) - method. - -2. Then, write a program that extracts the relevant information from the - serialized protocol buffer message. - -### Consume in text or JSON formats - -The following Bazel command line flags will output the BEP in -human-readable formats, such as text and JSON: - -``` ---build_event_text_file ---build_event_json_file -``` - -## Build Event Service - -The [Build Event -Service](https://github.com/googleapis/googleapis/blob/master/google/devtools/build/v1/publish_build_event.proto) -Protocol is a generic [gRPC](https://www.grpc.io) service for publishing build events. The Build Event -Service protocol is independent of the BEP and treats BEP events as opaque bytes. -Bazel ships with a gRPC client implementation of the Build Event Service protocol that -publishes Build Event Protocol events. One can specify the endpoint to send the -events to using the `--bes_backend=HOST:PORT` flag. If your backend uses gRPC, -you must prefix the address with the appropriate scheme: `grpc://` for plaintext -gRPC and `grpcs://` for gRPC with TLS enabled. - -### Build Event Service flags - -Bazel has several flags related to the Build Event Service protocol, including: - -* `--bes_backend` -* `--[no]bes_lifecycle_events` -* `--bes_results_url` -* `--bes_timeout` -* `--bes_instance_name` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Authentication and security - -Bazel’s Build Event Service implementation also supports authentication and TLS. -These settings can be controlled using the below flags. Please note that these -flags are also used for Bazel’s Remote Execution. This implies that the Build -Event Service and Remote Execution Endpoints need to share the same -authentication and TLS infrastructure. - -* `--[no]google_default_credentials` -* `--google_credentials` -* `--google_auth_scopes` -* `--tls_certificate` -* `--[no]tls_enabled` - -For a description of each of these flags, see the -[Command-Line Reference](/reference/command-line-reference). - -### Build Event Service and remote caching - -The BEP typically contains many references to log files (test.log, test.xml, -etc. ) stored on the machine where Bazel is running. A remote BES server -typically can't access these files as they are on different machines. A way to -work around this issue is to use Bazel with [remote -caching](/remote/caching). -Bazel will upload all output files to the remote cache (including files -referenced in the BEP) and the BES server can then fetch the referenced files -from the cache. - -See [GitHub issue 3689](https://github.com/bazelbuild/bazel/issues/3689) for -more details. diff --git a/8.4.2/remote/cache-local.mdx b/8.4.2/remote/cache-local.mdx deleted file mode 100644 index e6dc0c0..0000000 --- a/8.4.2/remote/cache-local.mdx +++ /dev/null @@ -1,91 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Local Execution' ---- - - - -This page describes how to investigate cache misses in the context of local -execution. - -This page assumes that you have a build and/or test that successfully builds -locally and is set up to utilize remote caching, and that you want to ensure -that the remote cache is being effectively utilized. - -For tips on how to check your cache hit rate and how to compare the execution -logs between two Bazel invocations, see -[Debugging Remote Cache Hits for Remote Execution](/remote/cache-remote). -Everything presented in that guide also applies to remote caching with local -execution. However, local execution presents some additional challenges. - -## Checking your cache hit rate - -Successful remote cache hits will show up in the status line, similar to -[Cache Hits rate with Remote -Execution](/remote/cache-remote#check-cache-hits). - -In the standard output of your Bazel run, you will see something like the -following: - -```none {:.devsite-disable-click-to-copy} - INFO: 7 processes: 3 remote cache hit, 4 linux-sandbox. -``` - -This means that out of 7 attempted actions, 3 got a remote cache hit and 4 -actions did not have cache hits and were executed locally using `linux-sandbox` -strategy. Local cache hits are not included in this summary. If you are getting -0 processes (or a number lower than expected), run `bazel clean` followed by -your build/test command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure successful communication with the remote endpoint - -To ensure your build is successfully communicating with the remote cache, follow -the steps in this section. - -1. Check your output for warnings - - With remote execution, a failure to talk to the remote endpoint would fail - your build. On the other hand, a cacheable local build would not fail if it - cannot cache. Check the output of your Bazel invocation for warnings, such - as: - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error reading from the remote cache: - ``` - - - or - - ```none {:.devsite-disable-click-to-copy} - WARNING: Error writing to the remote cache: - ``` - - - Such warnings will be followed by the error message detailing the connection - problem that should help you debug: for example, mistyped endpoint name or - incorrectly set credentials. Find and address any such errors. If the error - message you see does not give you enough information, try adding - `--verbose_failures`. - -2. Follow the steps from [Troubleshooting cache hits for remote - execution](/remote/cache-remote#troubleshooting_cache_hits) to - ensure that your cache-writing Bazel invocations are able to get cache hits - on the same machine and across machines. - -3. Ensure your cache-reading Bazel invocations can get cache hits. - - a. Since cache-reading Bazel invocations will have a different command-line set - up, take additional care to ensure that they are properly set up to - communicate with the remote cache. Ensure the `--remote_cache` flag is set - and there are no warnings in the output. - - b. Ensure your cache-reading Bazel invocations build the same targets as the - cache-writing Bazel invocations. - - c. Follow the same steps as to [ensure caching across - machines](/remote/cache-remote#caching-across-machines), - to ensure caching from your cache-writing Bazel invocation to your - cache-reading Bazel invocation. diff --git a/8.4.2/remote/cache-remote.mdx b/8.4.2/remote/cache-remote.mdx deleted file mode 100644 index a614f4f..0000000 --- a/8.4.2/remote/cache-remote.mdx +++ /dev/null @@ -1,179 +0,0 @@ ---- -title: 'Debugging Remote Cache Hits for Remote Execution' ---- - - - -This page describes how to check your cache hit rate and how to investigate -cache misses in the context of remote execution. - -This page assumes that you have a build and/or test that successfully -utilizes remote execution, and you want to ensure that you are effectively -utilizing remote cache. - -## Checking your cache hit rate - -In the standard output of your Bazel run, look at the `INFO` line that lists -processes, which roughly correspond to Bazel actions. That line details -where the action was run. Look for the `remote` label, which indicates an action -executed remotely, `linux-sandbox` for actions executed in a local sandbox, -and other values for other execution strategies. An action whose result came -from a remote cache is displayed as `remote cache hit`. - -For example: - -```none {:.devsite-disable-click-to-copy} -INFO: 11 processes: 6 remote cache hit, 3 internal, 2 remote. -``` - -In this example there were 6 remote cache hits, and 2 actions did not have -cache hits and were executed remotely. The 3 internal part can be ignored. -It is typically tiny internal actions, such as creating symbolic links. Local -cache hits are not included in this summary. If you are getting 0 processes -(or a number lower than expected), run `bazel clean` followed by your build/test -command. - -## Troubleshooting cache hits - -If you are not getting the cache hit rate you are expecting, do the following: - -### Ensure re-running the same build/test command produces cache hits - -1. Run the build(s) and/or test(s) that you expect to populate the cache. The - first time a new build is run on a particular stack, you can expect no remote - cache hits. As part of remote execution, action results are stored in the - cache and a subsequent run should pick them up. - -2. Run `bazel clean`. This command cleans your local cache, which allows - you to investigate remote cache hits without the results being masked by - local cache hits. - -3. Run the build(s) and test(s) that you are investigating again (on the same - machine). - -4. Check the `INFO` line for cache hit rate. If you see no processes except - `remote cache hit` and `internal`, then your cache is being correctly populated and - accessed. In that case, skip to the next section. - -5. A likely source of discrepancy is something non-hermetic in the build causing - the actions to receive different action keys across the two runs. To find - those actions, do the following: - - a. Re-run the build(s) or test(s) in question to obtain execution logs: - - ```posix-terminal - bazel clean - - bazel {{ '' }}--optional-flags{{ '' }} build //{{ '' }}your:target{{ '' }} --execution_log_compact_file=/tmp/exec1.log - ``` - - b. [Compare the execution logs](#compare-logs) between the - two runs. Ensure that the actions are identical across the two log files. - Discrepancies provide a clue about the changes that occurred between the - runs. Update your build to eliminate those discrepancies. - - If you are able to resolve the caching problems and now the repeated run - produces all cache hits, skip to the next section. - - If your action IDs are identical but there are no cache hits, then something - in your configuration is preventing caching. Continue with this section to - check for common problems. - -5. Check that all actions in the execution log have `cacheable` set to true. If - `cacheable` does not appear in the execution log for a give action, that - means that the corresponding rule may have a `no-cache` tag in its - definition in the `BUILD` file. Look at the `mnemonic` and `target_label` - fields in the execution log to help determine where the action is coming - from. - -6. If the actions are identical and `cacheable` but there are no cache hits, it - is possible that your command line includes `--noremote_accept_cached` which - would disable cache lookups for a build. - - If figuring out the actual command line is difficult, use the canonical - command line from the - [Build Event Protocol](/remote/bep) - as follows: - - a. Add `--build_event_text_file=/tmp/bep.txt` to your Bazel command to get - the text version of the log. - - b. Open the text version of the log and search for the - `structured_command_line` message with `command_line_label: "canonical"`. - It will list all the options after expansion. - - c. Search for `remote_accept_cached` and check whether it's set to `false`. - - d. If `remote_accept_cached` is `false`, determine where it is being - set to `false`: either at the command line or in a - [bazelrc](/run/bazelrc#bazelrc-file-locations) file. - -### Ensure caching across machines - -After cache hits are happening as expected on the same machine, run the -same build(s)/test(s) on a different machine. If you suspect that caching is -not happening across machines, do the following: - -1. Make a small modification to your build to avoid hitting existing caches. - -2. Run the build on the first machine: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec1.log - ``` - -3. Run the build on the second machine, ensuring the modification from step 1 - is included: - - ```posix-terminal - bazel clean - - bazel ... build ... --execution_log_compact_file=/tmp/exec2.log - ``` - -4. [Compare the execution logs](#compare-logs-the-execution-logs) for the two - runs. If the logs are not identical, investigate your build configurations - for discrepancies as well as properties from the host environment leaking - into either of the builds. - -## Comparing the execution logs - -The execution log contains records of actions executed during the build. -Each record describes both the inputs (not only files, but also command line -arguments, environment variables, etc) and the outputs of the action. Thus, -examination of the log can reveal why an action was reexecuted. - -The execution log can be produced in one of three formats: -compact (`--execution_log_compact_file`), -binary (`--execution_log_binary_file`) or JSON (`--execution_log_json_file`). -The compact format is recommended, as it produces much smaller files with very -little runtime overhead. The following instructions work for any format. You -can also convert between them using the `//src/tools/execlog:converter` tool. - -To compare logs for two builds that are not sharing cache hits as expected, -do the following: - -1. Get the execution logs from each build and store them as `/tmp/exec1.log` and - `/tmp/exec2.log`. - -2. Download the Bazel source code and build the `//src/tools/execlog:parser` - tool: - - git clone https://github.com/bazelbuild/bazel.git - cd bazel - bazel build //src/tools/execlog:parser - -3. Use the `//src/tools/execlog:parser` tool to convert the logs into a - human-readable text format. In this format, the actions in the second log are - sorted to match the order in the first log, making a comparison easier. - - bazel-bin/src/tools/execlog/parser \ - --log_path=/tmp/exec1.log \ - --log_path=/tmp/exec2.log \ - --output_path=/tmp/exec1.log.txt \ - --output_path=/tmp/exec2.log.txt - -4. Use your favourite text differ to diff `/tmp/exec1.log.txt` and - `/tmp/exec2.log.txt`. diff --git a/8.4.2/remote/caching.mdx b/8.4.2/remote/caching.mdx deleted file mode 100644 index 8fd6adc..0000000 --- a/8.4.2/remote/caching.mdx +++ /dev/null @@ -1,380 +0,0 @@ ---- -title: 'Remote Caching' ---- - - - -This page covers remote caching, setting up a server to host the cache, and -running builds using the remote cache. - -A remote cache is used by a team of developers and/or a continuous integration -(CI) system to share build outputs. If your build is reproducible, the -outputs from one machine can be safely reused on another machine, which can -make builds significantly faster. - -## Overview - -Bazel breaks a build into discrete steps, which are called actions. Each action -has inputs, output names, a command line, and environment variables. Required -inputs and expected outputs are declared explicitly for each action. - -You can set up a server to be a remote cache for build outputs, which are these -action outputs. These outputs consist of a list of output file names and the -hashes of their contents. With a remote cache, you can reuse build outputs -from another user's build rather than building each new output locally. - -To use remote caching: - -* Set up a server as the cache's backend -* Configure the Bazel build to use the remote cache -* Use Bazel version 0.10.0 or later - -The remote cache stores two types of data: - -* The action cache, which is a map of action hashes to action result metadata. -* A content-addressable store (CAS) of output files. - -Note that the remote cache additionally stores the stdout and stderr for every -action. Inspecting the stdout/stderr of Bazel thus is not a good signal for -[estimating cache hits](/remote/cache-local). - -### How a build uses remote caching - -Once a server is set up as the remote cache, you use the cache in multiple -ways: - -* Read and write to the remote cache -* Read and/or write to the remote cache except for specific targets -* Only read from the remote cache -* Not use the remote cache at all - -When you run a Bazel build that can read and write to the remote cache, -the build follows these steps: - -1. Bazel creates the graph of targets that need to be built, and then creates -a list of required actions. Each of these actions has declared inputs -and output filenames. -2. Bazel checks your local machine for existing build outputs and reuses any -that it finds. -3. Bazel checks the cache for existing build outputs. If the output is found, -Bazel retrieves the output. This is a cache hit. -4. For required actions where the outputs were not found, Bazel executes the -actions locally and creates the required build outputs. -5. New build outputs are uploaded to the remote cache. - -## Setting up a server as the cache's backend - -You need to set up a server to act as the cache's backend. A HTTP/1.1 -server can treat Bazel's data as opaque bytes and so many existing servers -can be used as a remote caching backend. Bazel's -[HTTP Caching Protocol](#http-caching) is what supports remote -caching. - -You are responsible for choosing, setting up, and maintaining the backend -server that will store the cached outputs. When choosing a server, consider: - -* Networking speed. For example, if your team is in the same office, you may -want to run your own local server. -* Security. The remote cache will have your binaries and so needs to be secure. -* Ease of management. For example, Google Cloud Storage is a fully managed service. - -There are many backends that can be used for a remote cache. Some options -include: - -* [nginx](#nginx) -* [bazel-remote](#bazel-remote) -* [Google Cloud Storage](#cloud-storage) - -### nginx - -nginx is an open source web server. With its [WebDAV module], it can be -used as a remote cache for Bazel. On Debian and Ubuntu you can install the -`nginx-extras` package. On macOS nginx is available via Homebrew: - -```posix-terminal -brew tap denji/nginx - -brew install nginx-full --with-webdav -``` - -Below is an example configuration for nginx. Note that you will need to -change `/path/to/cache/dir` to a valid directory where nginx has permission -to write and read. You may need to change `client_max_body_size` option to a -larger value if you have larger output files. The server will require other -configuration such as authentication. - - -Example configuration for `server` section in `nginx.conf`: - -```nginx -location /cache/ { - # The path to the directory where nginx should store the cache contents. - root /path/to/cache/dir; - # Allow PUT - dav_methods PUT; - # Allow nginx to create the /ac and /cas subdirectories. - create_full_put_path on; - # The maximum size of a single file. - client_max_body_size 1G; - allow all; -} -``` - -### bazel-remote - -bazel-remote is an open source remote build cache that you can use on -your infrastructure. It has been successfully used in production at -several companies since early 2018. Note that the Bazel project does -not provide technical support for bazel-remote. - -This cache stores contents on disk and also provides garbage collection -to enforce an upper storage limit and clean unused artifacts. The cache is -available as a [docker image] and its code is available on -[GitHub](https://github.com/buchgr/bazel-remote/). -Both the REST and gRPC remote cache APIs are supported. - -Refer to the [GitHub](https://github.com/buchgr/bazel-remote/) -page for instructions on how to use it. - -### Google Cloud Storage - -[Google Cloud Storage] is a fully managed object store which provides an -HTTP API that is compatible with Bazel's remote caching protocol. It requires -that you have a Google Cloud account with billing enabled. - -To use Cloud Storage as the cache: - -1. [Create a storage bucket](https://cloud.google.com/storage/docs/creating-buckets). -Ensure that you select a bucket location that's closest to you, as network bandwidth -is important for the remote cache. - -2. Create a service account for Bazel to authenticate to Cloud Storage. See -[Creating a service account](https://cloud.google.com/iam/docs/creating-managing-service-accounts#creating_a_service_account). - -3. Generate a secret JSON key and then pass it to Bazel for authentication. Store -the key securely, as anyone with the key can read and write arbitrary data -to/from your GCS bucket. - -4. Connect to Cloud Storage by adding the following flags to your Bazel command: - * Pass the following URL to Bazel by using the flag: - `--remote_cache=https://storage.googleapis.com{{ '' }}/bucket-name{{ '' }}` where `bucket-name` is the name of your storage bucket. - * Pass the authentication key using the flag: `--google_credentials={{ '' }}/path/to/your/secret-key{{ ''}}.json`, or - `--google_default_credentials` to use [Application Authentication](https://cloud.google.com/docs/authentication/production). - -5. You can configure Cloud Storage to automatically delete old files. To do so, see -[Managing Object Lifecycles](https://cloud.google.com/storage/docs/managing-lifecycles). - -### Other servers - -You can set up any HTTP/1.1 server that supports PUT and GET as the cache's -backend. Users have reported success with caching backends such as [Hazelcast](https://hazelcast.com), -[Apache httpd](http://httpd.apache.org), and [AWS S3](https://aws.amazon.com/s3). - -## Authentication - -As of version 0.11.0 support for HTTP Basic Authentication was added to Bazel. -You can pass a username and password to Bazel via the remote cache URL. The -syntax is `https://username:password@hostname.com:port/path`. Note that -HTTP Basic Authentication transmits username and password in plaintext over the -network and it's thus critical to always use it with HTTPS. - -## HTTP caching protocol - -Bazel supports remote caching via HTTP/1.1. The protocol is conceptually simple: -Binary data (BLOB) is uploaded via PUT requests and downloaded via GET requests. -Action result metadata is stored under the path `/ac/` and output files are stored -under the path `/cas/`. - -For example, consider a remote cache running under `http://localhost:8080/cache`. -A Bazel request to download action result metadata for an action with the SHA256 -hash `01ba4719...` will look as follows: - -```http -GET /cache/ac/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b HTTP/1.1 -Host: localhost:8080 -Accept: */* -Connection: Keep-Alive -``` - -A Bazel request to upload an output file with the SHA256 hash `15e2b0d3...` to -the CAS will look as follows: - -```http -PUT /cache/cas/15e2b0d3c33891ebb0f1ef609ec419420c20e320ce94c65fbc8c3312448eb225 HTTP/1.1 -Host: localhost:8080 -Accept: */* -Content-Length: 9 -Connection: Keep-Alive - -0x310x320x330x340x350x360x370x380x39 -``` - -## Run Bazel using the remote cache - -Once a server is set up as the remote cache, to use the remote cache you -need to add flags to your Bazel command. See list of configurations and -their flags below. - -You may also need configure authentication, which is specific to your -chosen server. - -You may want to add these flags in a `.bazelrc` file so that you don't -need to specify them every time you run Bazel. Depending on your project and -team dynamics, you can add flags to a `.bazelrc` file that is: - -* On your local machine -* In your project's workspace, shared with the team -* On the CI system - -### Read from and write to the remote cache - -Take care in who has the ability to write to the remote cache. You may want -only your CI system to be able to write to the remote cache. - -Use the following flag to read from and write to the remote cache: - -```posix-terminal -build --remote_cache=http://{{ '' }}your.host:port{{ '' }} -``` - -Besides `HTTP`, the following protocols are also supported: `HTTPS`, `grpc`, `grpcs`. - -Use the following flag in addition to the one above to only read from the -remote cache: - -```posix-terminal -build --remote_upload_local_results=false -``` - -### Exclude specific targets from using the remote cache - -To exclude specific targets from using the remote cache, tag the target with -`no-remote-cache`. For example: - -```starlark -java_library( - name = "target", - tags = ["no-remote-cache"], -) -``` - -### Delete content from the remote cache - -Deleting content from the remote cache is part of managing your server. -How you delete content from the remote cache depends on the server you have -set up as the cache. When deleting outputs, either delete the entire cache, -or delete old outputs. - -The cached outputs are stored as a set of names and hashes. When deleting -content, there's no way to distinguish which output belongs to a specific -build. - -You may want to delete content from the cache to: - -* Create a clean cache after a cache was poisoned -* Reduce the amount of storage used by deleting old outputs - -### Unix sockets - -The remote HTTP cache supports connecting over unix domain sockets. The behavior -is similar to curl's `--unix-socket` flag. Use the following to configure unix -domain socket: - -```posix-terminal - build --remote_cache=http://{{ '' }}your.host:port{{ '' }} - build --remote_proxy=unix:/{{ '' }}path/to/socket{{ '' }} -``` - -This feature is unsupported on Windows. - -## Disk cache - -Bazel can use a directory on the file system as a remote cache. This is -useful for sharing build artifacts when switching branches and/or working -on multiple workspaces of the same project, such as multiple checkouts. -Enable the disk cache as follows: - -```posix-terminal -build --disk_cache={{ '' }}path/to/build/cache{{ '' }} -``` - -You can pass a user-specific path to the `--disk_cache` flag using the `~` alias -(Bazel will substitute the current user's home directory). This comes in handy -when enabling the disk cache for all developers of a project via the project's -checked in `.bazelrc` file. - -### Garbage collection - -Starting with Bazel 7.4, you can use `--experimental_disk_cache_gc_max_size` and -`--experimental_disk_cache_gc_max_age` to set a maximum size for the disk cache -or for the age of individual cache entries. Bazel will automatically garbage -collect the disk cache while idling between builds; the idle timer can be set -with `--experimental_disk_cache_gc_idle_delay` (defaulting to 5 minutes). - -As an alternative to automatic garbage collection, we also provide a [tool]( -https://github.com/bazelbuild/bazel/tree/master/src/tools/diskcache) to run a -garbage collection on demand. - -## Known issues - -**Input file modification during a build** - -When an input file is modified during a build, Bazel might upload invalid -results to the remote cache. You can enable a change detection with -the `--experimental_guard_against_concurrent_changes` flag. There -are no known issues and it will be enabled by default in a future release. -See [issue #3360] for updates. Generally, avoid modifying source files during a -build. - -**Environment variables leaking into an action** - -An action definition contains environment variables. This can be a problem for -sharing remote cache hits across machines. For example, environments with -different `$PATH` variables won't share cache hits. Only environment variables -explicitly whitelisted via `--action_env` are included in an action -definition. Bazel's Debian/Ubuntu package used to install `/etc/bazel.bazelrc` -with a whitelist of environment variables including `$PATH`. If you are getting -fewer cache hits than expected, check that your environment doesn't have an old -`/etc/bazel.bazelrc` file. - -**Bazel does not track tools outside a workspace** - -Bazel currently does not track tools outside a workspace. This can be a -problem if, for example, an action uses a compiler from `/usr/bin/`. Then, -two users with different compilers installed will wrongly share cache hits -because the outputs are different but they have the same action hash. See -[issue #4558](https://github.com/bazelbuild/bazel/issues/4558) for updates. - -**Incremental in-memory state is lost when running builds inside docker containers** -Bazel uses server/client architecture even when running in single docker container. -On the server side, Bazel maintains an in-memory state which speeds up builds. -When running builds inside docker containers such as in CI, the in-memory state is lost -and Bazel must rebuild it before using the remote cache. - -## External links - -* **Your Build in a Datacenter:** The Bazel team gave a [talk](https://fosdem.org/2018/schedule/event/datacenter_build/) about remote caching and execution at FOSDEM 2018. - -* **Faster Bazel builds with remote caching: a benchmark:** Nicolò Valigi wrote a [blog post](https://nicolovaligi.com/faster-bazel-remote-caching-benchmark.html) -in which he benchmarks remote caching in Bazel. - -* [Adapting Rules for Remote Execution](/remote/rules) -* [Troubleshooting Remote Execution](/remote/sandbox) -* [WebDAV module](https://nginx.org/en/docs/http/ngx_http_dav_module.html) -* [Docker image](https://hub.docker.com/r/buchgr/bazel-remote-cache/) -* [bazel-remote](https://github.com/buchgr/bazel-remote/) -* [Google Cloud Storage](https://cloud.google.com/storage) -* [Google Cloud Console](https://cloud.google.com/console) -* [Bucket locations](https://cloud.google.com/storage/docs/bucket-locations) -* [Hazelcast](https://hazelcast.com) -* [Apache httpd](http://httpd.apache.org) -* [AWS S3](https://aws.amazon.com/s3) -* [issue #3360](https://github.com/bazelbuild/bazel/issues/3360) -* [gRPC](https://grpc.io/) -* [gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -* [Buildbarn](https://github.com/buildbarn) -* [Buildfarm](https://github.com/bazelbuild/bazel-buildfarm) -* [BuildGrid](https://gitlab.com/BuildGrid/buildgrid) -* [issue #4558](https://github.com/bazelbuild/bazel/issues/4558) -* [Application Authentication](https://cloud.google.com/docs/authentication/production) -* [NativeLink](https://github.com/TraceMachina/nativelink) diff --git a/8.4.2/remote/creating.mdx b/8.4.2/remote/creating.mdx deleted file mode 100644 index 0e46a07..0000000 --- a/8.4.2/remote/creating.mdx +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: 'Creating Persistent Workers' ---- - - - -[Persistent workers](/remote/persistent) can make your build faster. If -you have repeated actions in your build that have a high startup cost or would -benefit from cross-action caching, you may want to implement your own persistent -worker to perform these actions. - -The Bazel server communicates with the worker using `stdin`/`stdout`. It -supports the use of protocol buffers or JSON strings. - -The worker implementation has two parts: - -* The [worker](#making-worker). -* The [rule that uses the worker](#rule-uses-worker). - -## Making the worker - -A persistent worker upholds a few requirements: - -* It reads - [WorkRequests](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L36) - from its `stdin`. -* It writes - [WorkResponses](https://github.com/bazelbuild/bazel/blob/54a547f30fd582933889b961df1d6e37a3e33d85/src/main/protobuf/worker_protocol.proto#L77) - (and only `WorkResponse`s) to its `stdout`. -* It accepts the `--persistent_worker` flag. The wrapper must recognize the - `--persistent_worker` command-line flag and only make itself persistent if - that flag is passed, otherwise it must do a one-shot compilation and exit. - -If your program upholds these requirements, it can be used as a persistent -worker! - -### Work requests - -A `WorkRequest` contains a list of arguments to the worker, a list of -path-digest pairs representing the inputs the worker can access (this isn’t -enforced, but you can use this info for caching), and a request id, which is 0 -for singleplex workers. - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). This document uses camel case -in the JSON examples, but snake case when talking about the field regardless of -protocol. - -```json -{ - "arguments" : ["--some_argument"], - "inputs" : [ - { "path": "/path/to/my/file/1", "digest": "fdk3e2ml23d"}, - { "path": "/path/to/my/file/2", "digest": "1fwqd4qdd" } - ], - "requestId" : 12 -} -``` - -The optional `verbosity` field can be used to request extra debugging output -from the worker. It is entirely up to the worker what and how to output. Higher -values indicate more verbose output. Passing the `--worker_verbose` flag to -Bazel sets the `verbosity` field to 10, but smaller or larger values can be used -manually for different amounts of output. - -The optional `sandbox_dir` field is used only by workers that support -[multiplex sandboxing](/remote/multiplex). - -### Work responses - -A `WorkResponse` contains a request id, a zero or nonzero exit code, and an -output message describing any errors encountered in processing or executing -the request. A worker should capture the `stdout` and `stderr` of any tool it -calls and report them through the `WorkResponse`. Writing it to the `stdout` of -the worker process is unsafe, as it will interfere with the worker protocol. -Writing it to the `stderr` of the worker process is safe, but the result is -collected in a per-worker log file instead of ascribed to individual actions. - -```json -{ - "exitCode" : 1, - "output" : "Action failed with the following message:\nCould not find input - file \"/path/to/my/file/1\"", - "requestId" : 12 -} -``` - -As per the norm for protobufs, all fields are optional. However, Bazel requires -the `WorkRequest` and the corresponding `WorkResponse`, to have the same request -id, so the request id must be specified if it is nonzero. This is a valid -`WorkResponse`. - -```json -{ - "requestId" : 12, -} -``` - -A `request_id` of 0 indicates a "singleplex" request, used when this request -cannot be processed in parallel with other requests. The server guarantees that -a given worker receives requests with either only `request_id` 0 or only -`request_id` greater than zero. Singleplex requests are sent in serial, for -example if the server doesn't send another request until it has received a -response (except for cancel requests, see below). - -**Notes** - -* Each protocol buffer is preceded by its length in `varint` format (see - [`MessageLite.writeDelimitedTo()`](https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/MessageLite.html#writeDelimitedTo-java.io.OutputStream-). -* JSON requests and responses are not preceded by a size indicator. -* JSON requests uphold the same structure as the protobuf, but use standard - JSON and use camel case for all field names. -* In order to maintain the same backward and forward compatibility properties - as protobuf, JSON workers must tolerate unknown fields in these messages, - and use the protobuf defaults for missing values. -* Bazel stores requests as protobufs and converts them to JSON using - [protobuf's JSON format](https://cs.opensource.google/protobuf/protobuf/+/master:java/util/src/main/java/com/google/protobuf/util/JsonFormat.java) - -### Cancellation - -Workers can optionally allow work requests to be cancelled before they finish. -This is particularly useful in connection with dynamic execution, where local -execution can regularly be interrupted by a faster remote execution. To allow -cancellation, add `supports-worker-cancellation: 1` to the -`execution-requirements` field (see below) and set the -`--experimental_worker_cancellation` flag. - -A **cancel request** is a `WorkRequest` with the `cancel` field set (and -similarly a **cancel response** is a `WorkResponse` with the `was_cancelled` -field set). The only other field that must be in a cancel request or cancel -response is `request_id`, indicating which request to cancel. The `request_id` -field will be 0 for singleplex workers or the non-0 `request_id` of a previously -sent `WorkRequest` for multiplex workers. The server may send cancel requests -for requests that the worker has already responded to, in which case the cancel -request must be ignored. - -Each non-cancel `WorkRequest` message must be answered exactly once, whether or -not it was cancelled. Once the server has sent a cancel request, the worker may -respond with a `WorkResponse` with the `request_id` set and the `was_cancelled` -field set to true. Sending a regular `WorkResponse` is also accepted, but the -`output` and `exit_code` fields will be ignored. - -Once a response has been sent for a `WorkRequest`, the worker must not touch the -files in its working directory. The server is free to clean up the files, -including temporary files. - -## Making the rule that uses the worker - -You'll also need to create a rule that generates actions to be performed by the -worker. Making a Starlark rule that uses a worker is just like -[creating any other rule](https://github.com/bazelbuild/examples/tree/master/rules). - -In addition, the rule needs to contain a reference to the worker itself, and -there are some requirements for the actions it produces. - -### Referring to the worker - -The rule that uses the worker needs to contain a field that refers to the worker -itself, so you'll need to create an instance of a `\*\_binary` rule to define -your worker. If your worker is called `MyWorker.Java`, this might be the -associated rule: - -```python -java_binary( - name = "worker", - srcs = ["MyWorker.Java"], -) -``` - -This creates the "worker" label, which refers to the worker binary. You'll then -define a rule that *uses* the worker. This rule should define an attribute that -refers to the worker binary. - -If the worker binary you built is in a package named "work", which is at the top -level of the build, this might be the attribute definition: - -```python -"worker": attr.label( - default = Label("//work:worker"), - executable = True, - cfg = "exec", -) -``` - -`cfg = "exec"` indicates that the worker should be built to run on your -execution platform rather than on the target platform (i.e., the worker is used -as tool during the build). - -### Work action requirements - -The rule that uses the worker creates actions for the worker to perform. These -actions have a couple of requirements. - -* The *"arguments"* field. This takes a list of strings, all but the last of - which are arguments passed to the worker upon startup. The last element in - the "arguments" list is a `flag-file` (@-preceded) argument. Workers read - the arguments from the specified flagfile on a per-WorkRequest basis. Your - rule can write non-startup arguments for the worker to this flagfile. - -* The *"execution-requirements"* field, which takes a dictionary containing - `"supports-workers" : "1"`, `"supports-multiplex-workers" : "1"`, or both. - - The "arguments" and "execution-requirements" fields are required for all - actions sent to workers. Additionally, actions that should be executed by - JSON workers need to include `"requires-worker-protocol" : "json"` in the - execution requirements field. `"requires-worker-protocol" : "proto"` is also - a valid execution requirement, though it’s not required for proto workers, - since they are the default. - - You can also set a `worker-key-mnemonic` in the execution requirements. This - may be useful if you're reusing the executable for multiple action types and - want to distinguish actions by this worker. - -* Temporary files generated in the course of the action should be saved to the - worker's directory. This enables sandboxing. - -Note: To pass an argument starting with a literal `@`, start the argument with -`@@` instead. If an argument is also an external repository label, it will not -be considered a flagfile argument. - -Assuming a rule definition with "worker" attribute described above, in addition -to a "srcs" attribute representing the inputs, an "output" attribute -representing the outputs, and an "args" attribute representing the worker -startup args, the call to `ctx.actions.run` might be: - -```python -ctx.actions.run( - inputs=ctx.files.srcs, - outputs=[ctx.outputs.output], - executable=ctx.executable.worker, - mnemonic="someMnemonic", - execution_requirements={ - "supports-workers" : "1", - "requires-worker-protocol" : "json"}, - arguments=ctx.attr.args + ["@flagfile"] - ) -``` - -For another example, see -[Implementing persistent workers](/remote/persistent#implementation). - -## Examples - -The Bazel code base uses -[Java compiler workers](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/java_tools/buildjar/java/com/google/devtools/build/buildjar/BazelJavaBuilder.java), -in addition to an -[example JSON worker](https://github.com/bazelbuild/bazel/blob/c65f768fec9889bbf1ee934c61d0dc061ea54ca2/src/test/java/com/google/devtools/build/lib/worker/ExampleWorker.java) -that is used in our integration tests. - -You can use their -[scaffolding](https://github.com/bazelbuild/bazel/blob/a4251eab6988d6cf4f5e35681fbe2c1b0abe48ef/src/main/java/com/google/devtools/build/lib/worker/WorkRequestHandler.java) -to make any Java-based tool into a worker by passing in the correct callback. - -For an example of a rule that uses a worker, take a look at Bazel's -[worker integration test](https://github.com/bazelbuild/bazel/blob/22b4dbcaf05756d506de346728db3846da56b775/src/test/shell/integration/bazel_worker_test.sh#L106). - -External contributors have implemented workers in a variety of languages; take a -look at -[Polyglot implementations of Bazel persistent workers](https://github.com/Ubehebe/bazel-worker-examples). -You can -[find many more examples on GitHub](https://github.com/search?q=bazel+workrequest&type=Code)! diff --git a/8.4.2/remote/multiplex.mdx b/8.4.2/remote/multiplex.mdx deleted file mode 100644 index b4b0a0d..0000000 --- a/8.4.2/remote/multiplex.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: 'Multiplex Workers (Experimental Feature)' ---- - - - -This page describes multiplex workers, how to write multiplex-compatible -rules, and workarounds for certain limitations. - -Caution: Experimental features are subject to change at any time. - -_Multiplex workers_ allow Bazel to handle multiple requests with a single worker -process. For multi-threaded workers, Bazel can use fewer resources to -achieve the same, or better performance. For example, instead of having one -worker process per worker, Bazel can have four multiplexed workers talking to -the same worker process, which can then handle requests in parallel. For -languages like Java and Scala, this saves JVM warm-up time and JIT compilation -time, and in general it allows using one shared cache between all workers of -the same type. - -## Overview - -There are two layers between the Bazel server and the worker process. For certain -mnemonics that can run processes in parallel, Bazel gets a `WorkerProxy` from -the worker pool. The `WorkerProxy` forwards requests to the worker process -sequentially along with a `request_id`, the worker process processes the request -and sends responses to the `WorkerMultiplexer`. When the `WorkerMultiplexer` -receives a response, it parses the `request_id` and then forwards the responses -back to the correct `WorkerProxy`. Just as with non-multiplexed workers, all -communication is done over standard in/out, but the tool cannot just use -`stderr` for user-visible output ([see below](#output)). - -Each worker has a key. Bazel uses the key's hash code (composed of environment -variables, the execution root, and the mnemonic) to determine which -`WorkerMultiplexer` to use. `WorkerProxy`s communicate with the same -`WorkerMultiplexer` if they have the same hash code. Therefore, assuming -environment variables and the execution root are the same in a single Bazel -invocation, each unique mnemonic can only have one `WorkerMultiplexer` and one -worker process. The total number of workers, including regular workers and -`WorkerProxy`s, is still limited by `--worker_max_instances`. - -## Writing multiplex-compatible rules - -The rule's worker process should be multi-threaded to take advantage of -multiplex workers. Protobuf allows a ruleset to parse a single request even -though there might be multiple requests piling up in the stream. Whenever the -worker process parses a request from the stream, it should handle the request in -a new thread. Because different thread could complete and write to the stream at -the same time, the worker process needs to make sure the responses are written -atomically (messages don't overlap). Responses must contain the -`request_id` of the request they're handling. - -### Handling multiplex output - -Multiplex workers need to be more careful about handling their output than -singleplex workers. Anything sent to `stderr` will go into a single log file -shared among all `WorkerProxy`s of the same type, -randomly interleaved between concurrent requests. While redirecting `stdout` -into `stderr` is a good idea, do not collect that output into the `output` -field of `WorkResponse`, as that could show the user mangled pieces of output. -If your tool only sends user-oriented output to `stdout` or `stderr`, you will -need to change that behaviour before you can enable multiplex workers. - -## Enabling multiplex workers - -Multiplex workers are not enabled by default. A ruleset can turn on multiplex -workers by using the `supports-multiplex-workers` tag in the -`execution_requirements` of an action (just like the `supports-workers` tag -enables regular workers). As is the case when using regular workers, a worker -strategy needs to be specified, either at the ruleset level (for example, -`--strategy=[some_mnemonic]=worker`) or generally at the strategy level (for -example, `--dynamic_local_strategy=worker,standalone`.) No additional flags are -necessary, and `supports-multiplex-workers` takes precedence over -`supports-workers`, if both are set. You can turn off multiplex workers -globally by passing `--noworker_multiplex`. - -A ruleset is encouraged to use multiplex workers if possible, to reduce memory -pressure and improve performance. However, multiplex workers are not currently -compatible with [dynamic execution](/remote/dynamic) unless they -implement multiplex sandboxing. Attempting to run non-sandboxed multiplex -workers with dynamic execution will silently use sandboxed -singleplex workers instead. - -## Multiplex sandboxing - -Multiplex workers can be sandboxed by adding explicit support for it in the -worker implementations. While singleplex worker sandboxing can be done by -running each worker process in its own sandbox, multiplex workers share the -process working directory between multiple parallel requests. To allow -sandboxing of multiplex workers, the worker must support reading from and -writing to a subdirectory specified in each request, instead of directly in -its working directory. - -To support multiplex sandboxing, the worker must use the `sandbox_dir` field -from the `WorkRequest` and use that as a prefix for all file reads and writes. -While the `arguments` and `inputs` fields remain unchanged from an unsandboxed -request, the actual inputs are relative to the `sandbox_dir`. The worker must -translate file paths found in `arguments` and `inputs` to read from this -modified path, and must also write all outputs relative to the `sandbox_dir`. -This includes paths such as '.', as well as paths found in files specified -in the arguments (such as ["argfile"](https://docs.oracle.com/javase/7/docs/technotes/tools/windows/javac.html#commandlineargfile) arguments). - -Once a worker supports multiplex sandboxing, the ruleset can declare this -support by adding `supports-multiplex-sandboxing` to the -`execution_requirements` of an action. Bazel will then use multiplex sandboxing -if the `--experimental_worker_multiplex_sandboxing` flag is passed, or if -the worker is used with dynamic execution. - -The worker files of a sandboxed multiplex worker are still relative to the -working directory of the worker process. Thus, if a file is -used both for running the worker and as an input, it must be specified both as -an input in the flagfile argument as well as in `tools`, `executable`, or -`runfiles`. diff --git a/8.4.2/remote/output-directories.mdx b/8.4.2/remote/output-directories.mdx deleted file mode 100644 index 2f9d99e..0000000 --- a/8.4.2/remote/output-directories.mdx +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: 'Output Directory Layout' ---- - - - -This page covers requirements and layout for output directories. - -## Requirements - -Requirements for an output directory layout: - -* Doesn't collide if multiple users are building on the same box. -* Supports building in multiple workspaces at the same time. -* Supports building for multiple target configurations in the same workspace. -* Doesn't collide with any other tools. -* Is easy to access. -* Is easy to clean, even selectively. -* Is unambiguous, even if the user relies on symbolic links when changing into - their client directory. -* All the build state per user should be underneath one directory ("I'd like to - clean all the .o files from all my clients.") - -## Current layout - -The solution that's currently implemented: - -* Bazel must be invoked from a directory containing a repo boundary file, or a - subdirectory thereof. In other words, Bazel must be invoked from inside a - [repository](../external/overview#repository). Otherwise, an error is - reported. -* The _outputRoot_ directory defaults to `~/.cache/bazel` on Linux, - `/private/var/tmp` on macOS, and on Windows it defaults to `%HOME%` if - set, else `%USERPROFILE%` if set, else the result of calling - `SHGetKnownFolderPath()` with the `FOLDERID_Profile` flag set. If the - environment variable `$XDG_CACHE_HOME` is set on either Linux or - macOS, the value `${XDG_CACHE_HOME}/bazel` will override the default. - If the environment variable `$TEST_TMPDIR` is set, as in a test of Bazel - itself, then that value overrides any defaults. -* The Bazel user's build state is located beneath `outputRoot/_bazel_$USER`. - This is called the _outputUserRoot_ directory. -* Beneath the `outputUserRoot` directory there is an `install` directory, and in - it is an `installBase` directory whose name is the MD5 hash of the Bazel - installation manifest. -* Beneath the `outputUserRoot` directory, an `outputBase` directory - is also created whose name is the MD5 hash of the path name of the workspace - root. So, for example, if Bazel is running in the workspace root - `/home/user/src/my-project` (or in a directory symlinked to that one), then - an output base directory is created called: - `/home/user/.cache/bazel/_bazel_user/7ffd56a6e4cb724ea575aba15733d113`. You - can also run `echo -n $(pwd) | md5sum` in the workspace root to get the MD5. -* You can use Bazel's `--output_base` startup option to override the default - output base directory. For example, - `bazel --output_base=/tmp/bazel/output build x/y:z`. -* You can also use Bazel's `--output_user_root` startup option to override the - default install base and output base directories. For example: - `bazel --output_user_root=/tmp/bazel build x/y:z`. - -The symlinks for "bazel-<workspace-name>", "bazel-out", "bazel-testlogs", -and "bazel-bin" are put in the workspace directory; these symlinks point to some -directories inside a target-specific directory inside the output directory. -These symlinks are only for the user's convenience, as Bazel itself does not -use them. Also, this is done only if the workspace root is writable. - -## Layout diagram - -The directories are laid out as follows: - -``` -<workspace-name>/ <== The workspace root - bazel-my-project => <..._main> <== Symlink to execRoot - bazel-out => <...bazel-out> <== Convenience symlink to outputPath - bazel-bin => <...bin> <== Convenience symlink to most recent written bin dir $(BINDIR) - bazel-testlogs => <...testlogs> <== Convenience symlink to the test logs directory - -/home/user/.cache/bazel/ <== Root for all Bazel output on a machine: outputRoot - _bazel_$USER/ <== Top level directory for a given user depends on the user name: - outputUserRoot - install/ - fba9a2c87ee9589d72889caf082f1029/ <== Hash of the Bazel install manifest: installBase - _embedded_binaries/ <== Contains binaries and scripts unpacked from the data section of - the bazel executable on first run (such as helper scripts and the - main Java file BazelServer_deploy.jar) - 7ffd56a6e4cb724ea575aba15733d113/ <== Hash of the client's workspace root (such as - /home/user/src/my-project): outputBase - action_cache/ <== Action cache directory hierarchy - This contains the persistent record of the file - metadata (timestamps, and perhaps eventually also MD5 - sums) used by the FilesystemValueChecker. - command.log <== A copy of the stdout/stderr output from the most - recent bazel command. - external/ <== The directory that remote repositories are - downloaded/symlinked into. - server/ <== The Bazel server puts all server-related files (such - as socket file, logs, etc) here. - jvm.out <== The debugging output for the server. - execroot/ <== The working directory for all actions. For special - cases such as sandboxing and remote execution, the - actions run in a directory that mimics execroot. - Implementation details, such as where the directories - are created, are intentionally hidden from the action. - Every action can access its inputs and outputs relative - to the execroot directory. - _main/ <== Working tree for the Bazel build & root of symlink forest: execRoot - _bin/ <== Helper tools are linked from or copied to here. - - bazel-out/ <== All actual output of the build is under here: outputPath - _tmp/actions/ <== Action output directory. This contains a file with the - stdout/stderr for every action from the most recent - bazel run that produced output. - local_linux-fastbuild/ <== one subdirectory per unique target BuildConfiguration instance; - this is currently encoded - bin/ <== Bazel outputs binaries for target configuration here: $(BINDIR) - foo/bar/_objs/baz/ <== Object files for a cc_* rule named //foo/bar:baz - foo/bar/baz1.o <== Object files from source //foo/bar:baz1.cc - other_package/other.o <== Object files from source //other_package:other.cc - foo/bar/baz <== foo/bar/baz might be the artifact generated by a cc_binary named - //foo/bar:baz - foo/bar/baz.runfiles/ <== The runfiles symlink farm for the //foo/bar:baz executable. - MANIFEST - _main/ - ... - genfiles/ <== Bazel puts generated source for the target configuration here: - $(GENDIR) - foo/bar.h such as foo/bar.h might be a headerfile generated by //foo:bargen - testlogs/ <== Bazel internal test runner puts test log files here - foo/bartest.log such as foo/bar.log might be an output of the //foo:bartest test with - foo/bartest.status foo/bartest.status containing exit status of the test (such as - PASSED or FAILED (Exit 1), etc) - include/ <== a tree with include symlinks, generated as needed. The - bazel-include symlinks point to here. This is used for - linkstamp stuff, etc. - host/ <== BuildConfiguration for build host (user's workstation), for - building prerequisite tools, that will be used in later stages - of the build (ex: Protocol Compiler) - <packages>/ <== Packages referenced in the build appear as if under a regular workspace -``` - -The layout of the \*.runfiles directories is documented in more detail in the places pointed to by RunfilesSupport. - -## `bazel clean` - -`bazel clean` does an `rm -rf` on the `outputPath` and the `action_cache` -directory. It also removes the workspace symlinks. The `--expunge` option -will clean the entire outputBase. diff --git a/8.4.2/remote/persistent.mdx b/8.4.2/remote/persistent.mdx deleted file mode 100644 index 1a56946..0000000 --- a/8.4.2/remote/persistent.mdx +++ /dev/null @@ -1,265 +0,0 @@ ---- -title: 'Persistent Workers' ---- - - - -This page covers how to use persistent workers, the benefits, requirements, and -how workers affect sandboxing. - -A persistent worker is a long-running process started by the Bazel server, which -functions as a *wrapper* around the actual *tool* (typically a compiler), or is -the *tool* itself. In order to benefit from persistent workers, the tool must -support doing a sequence of compilations, and the wrapper needs to translate -between the tool's API and the request/response format described below. The same -worker might be called with and without the `--persistent_worker` flag in the -same build, and is responsible for appropriately starting and talking to the -tool, as well as shutting down workers on exit. Each worker instance is assigned -(but not chrooted to) a separate working directory under -`/bazel-workers`. - -Using persistent workers is an -[execution strategy](/docs/user-manual#execution-strategy) that decreases -start-up overhead, allows more JIT compilation, and enables caching of for -example the abstract syntax trees in the action execution. This strategy -achieves these improvements by sending multiple requests to a long-running -process. - -Persistent workers are implemented for multiple languages, including Java, -[Scala](https://github.com/bazelbuild/rules_scala), -[Kotlin](https://github.com/bazelbuild/rules_kotlin), and more. - -Programs using a NodeJS runtime can use the -[@bazel/worker](https://www.npmjs.com/package/@bazel/worker) helper library to -implement the worker protocol. - -## Using persistent workers - -[Bazel 0.27 and higher](https://blog.bazel.build/2019/06/19/list-strategy.html) -uses persistent workers by default when executing builds, though remote -execution takes precedence. For actions that do not support persistent workers, -Bazel falls back to starting a tool instance for each action. You can explicitly -set your build to use persistent workers by setting the `worker` -[strategy](/docs/user-manual#execution-strategy) for the applicable tool -mnemonics. As a best practice, this example includes specifying `local` as a -fallback to the `worker` strategy: - -```posix-terminal -bazel build //{{ '' }}my:target{{ '' }} --strategy=Javac=worker,local -``` - -Using the workers strategy instead of the local strategy can boost compilation -speed significantly, depending on implementation. For Java, builds can be 2–4 -times faster, sometimes more for incremental compilation. Compiling Bazel is -about 2.5 times as fast with workers. For more details, see the -"[Choosing number of workers](#number-of-workers)" section. - -If you also have a remote build environment that matches your local build -environment, you can use the experimental -[*dynamic* strategy](https://blog.bazel.build/2019/02/01/dynamic-spawn-scheduler.html), -which races a remote execution and a worker execution. To enable the dynamic -strategy, pass the -[--experimental_spawn_scheduler](/reference/command-line-reference#flag--experimental_spawn_scheduler) -flag. This strategy automatically enables workers, so there is no need to -specify the `worker` strategy, but you can still use `local` or `sandboxed` as -fallbacks. - -## Choosing number of workers - -The default number of worker instances per mnemonic is 4, but can be adjusted -with the -[`worker_max_instances`](/reference/command-line-reference#flag--worker_max_instances) -flag. There is a trade-off between making good use of the available CPUs and the -amount of JIT compilation and cache hits you get. With more workers, more -targets will pay start-up costs of running non-JITted code and hitting cold -caches. If you have a small number of targets to build, a single worker may give -the best trade-off between compilation speed and resource usage (for example, -see [issue #8586](https://github.com/bazelbuild/bazel/issues/8586). -The `worker_max_instances` flag sets the maximum number of worker instances per -mnemonic and flag set (see below), so in a mixed system you could end up using -quite a lot of memory if you keep the default value. For incremental builds the -benefit of multiple worker instances is even smaller. - -This graph shows the from-scratch compilation times for Bazel (target -`//src:bazel`) on a 6-core hyper-threaded Intel Xeon 3.5 GHz Linux workstation -with 64 GB of RAM. For each worker configuration, five clean builds are run and -the average of the last four are taken. - -![Graph of performance improvements of clean builds](/docs/images/workers-clean-chart.png "Performance improvements of clean builds") - -**Figure 1.** Graph of performance improvements of clean builds. - -For this configuration, two workers give the fastest compile, though at only 14% -improvement compared to one worker. One worker is a good option if you want to -use less memory. - -Incremental compilation typically benefits even more. Clean builds are -relatively rare, but changing a single file between compiles is common, in -particular in test-driven development. The above example also has some non-Java -packaging actions to it that can overshadow the incremental compile time. - -Recompiling the Java sources only -(`//src/main/java/com/google/devtools/build/lib/bazel:BazelServer_deploy.jar`) -after changing an internal string constant in -[AbstractContainerizingSandboxedSpawn.java](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/sandbox/AbstractContainerizingSandboxedSpawn.java) -gives a 3x speed-up (average of 20 incremental builds with one warmup build -discarded): - -![Graph of performance improvements of incremental builds](/docs/images/workers-incremental-chart.png "Performance improvements of incremental builds") - -**Figure 2.** Graph of performance improvements of incremental builds. - -The speed-up depends on the change being made. A speed-up of a factor 6 is -measured in the above situation when a commonly used constant is changed. - -## Modifying persistent workers - -You can pass the -[`--worker_extra_flag`](/reference/command-line-reference#flag--worker_extra_flag) -flag to specify start-up flags to workers, keyed by mnemonic. For instance, -passing `--worker_extra_flag=javac=--debug` turns on debugging for Javac only. -Only one worker flag can be set per use of this flag, and only for one mnemonic. -Workers are not just created separately for each mnemonic, but also for -variations in their start-up flags. Each combination of mnemonic and start-up -flags is combined into a `WorkerKey`, and for each `WorkerKey` up to -`worker_max_instances` workers may be created. See the next section for how the -action configuration can also specify set-up flags. - -Passing the -[`--worker_sandboxing`](/reference/command-line-reference#flag--worker_sandboxing) -flag makes each worker request use a separate sandbox directory for all its -inputs. Setting up the [sandbox](/docs/sandboxing) takes some extra time, -especially on macOS, but gives a better correctness guarantee. - -The -[`--worker_quit_after_build`](/reference/command-line-reference#flag--worker_quit_after_build) -flag is mainly useful for debugging and profiling. This flag forces all workers -to quit once a build is done. You can also pass -[`--worker_verbose`](/reference/command-line-reference#flag--worker_verbose) to -get more output about what the workers are doing. This flag is reflected in the -`verbosity` field in `WorkRequest`, allowing worker implementations to also be -more verbose. - -Workers store their logs in the `/bazel-workers` directory, for -example -`/tmp/_bazel_larsrc/191013354bebe14fdddae77f2679c3ef/bazel-workers/worker-1-Javac.log`. -The file name includes the worker id and the mnemonic. Since there can be more -than one `WorkerKey` per mnemonic, you may see more than `worker_max_instances` -log files for a given mnemonic. - -For Android builds, see details at the -[Android Build Performance page](/docs/android-build-performance). - -## Implementing persistent workers - -See the [creating persistent workers](/remote/creating) page for more -information on how to make a worker. - -This example shows a Starlark configuration for a worker that uses JSON: - -```python -args_file = ctx.actions.declare_file(ctx.label.name + "_args_file") -ctx.actions.write( - output = args_file, - content = "\n".join(["-g", "-source", "1.5"] + ctx.files.srcs), -) -ctx.actions.run( - mnemonic = "SomeCompiler", - executable = "bin/some_compiler_wrapper", - inputs = inputs, - outputs = outputs, - arguments = [ "-max_mem=4G", "@%s" % args_file.path], - execution_requirements = { - "supports-workers" : "1", "requires-worker-protocol" : "json" } -) -``` - -With this definition, the first use of this action would start with executing -the command line `/bin/some_compiler -max_mem=4G --persistent_worker`. A request -to compile `Foo.java` would then look like: - -NOTE: While the protocol buffer specification uses "snake case" (`request_id`), -the JSON protocol uses "camel case" (`requestId`). In this document, we will use -camel case in the JSON examples, but snake case when talking about the field -regardless of protocol. - -```json -{ - "arguments": [ "-g", "-source", "1.5", "Foo.java" ] - "inputs": [ - { "path": "symlinkfarm/input1", "digest": "d49a..." }, - { "path": "symlinkfarm/input2", "digest": "093d..." }, - ], -} -``` - -The worker receives this on `stdin` in newline-delimited JSON format (because -`requires-worker-protocol` is set to JSON). The worker then performs the action, -and sends a JSON-formatted `WorkResponse` to Bazel on its stdout. Bazel then -parses this response and manually converts it to a `WorkResponse` proto. To -communicate with the associated worker using binary-encoded protobuf instead of -JSON, `requires-worker-protocol` would be set to `proto`, like this: - -``` - execution_requirements = { - "supports-workers" : "1" , - "requires-worker-protocol" : "proto" - } -``` - -If you do not include `requires-worker-protocol` in the execution requirements, -Bazel will default the worker communication to use protobuf. - -Bazel derives the `WorkerKey` from the mnemonic and the shared flags, so if this -configuration allowed changing the `max_mem` parameter, a separate worker would -be spawned for each value used. This can lead to excessive memory consumption if -too many variations are used. - -Each worker can currently only process one request at a time. The experimental -[multiplex workers](/remote/multiplex) feature allows using multiple -threads, if the underlying tool is multithreaded and the wrapper is set up to -understand this. - -In -[this GitHub repo](https://github.com/Ubehebe/bazel-worker-examples), -you can see example worker wrappers written in Java as well as in Python. If you -are working in JavaScript or TypeScript, the -[@bazel/worker package](https://www.npmjs.com/package/@bazel/worker) -and -[nodejs worker example](https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/worker) -might be helpful. - -## How do workers affect sandboxing? - -Using the `worker` strategy by default does not run the action in a -[sandbox](/docs/sandboxing), similar to the `local` strategy. You can set the -`--worker_sandboxing` flag to run all workers inside sandboxes, making sure each -execution of the tool only sees the input files it's supposed to have. The tool -may still leak information between requests internally, for instance through a -cache. Using `dynamic` strategy -[requires workers to be sandboxed](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/exec/SpawnStrategyRegistry.java). - -To allow correct use of compiler caches with workers, a digest is passed along -with each input file. Thus the compiler or the wrapper can check if the input is -still valid without having to read the file. - -Even when using the input digests to guard against unwanted caching, sandboxed -workers offer less strict sandboxing than a pure sandbox, because the tool may -keep other internal state that has been affected by previous requests. - -Multiplex workers can only be sandboxed if the worker implementation support it, -and this sandboxing must be separately enabled with the -`--experimental_worker_multiplex_sandboxing` flag. See more details in -[the design doc](https://docs.google.com/document/d/1ncLW0hz6uDhNvci1dpzfEoifwTiNTqiBEm1vi-bIIRM/edit)). - -## Further reading - -For more information on persistent workers, see: - -* [Original persistent workers blog post](https://blog.bazel.build/2015/12/10/java-workers.html) -* [Haskell implementation description](https://www.tweag.io/blog/2019-09-25-bazel-ghc-persistent-worker-internship/) -* [Blog post by Mike Morearty](https://medium.com/@mmorearty/how-to-create-a-persistent-worker-for-bazel-7738bba2cabb) -* [Front End Development with Bazel: Angular/TypeScript and Persistent Workers - w/ Asana](https://www.youtube.com/watch?v=0pgERydGyqo) -* [Bazel strategies explained](https://jmmv.dev/2019/12/bazel-strategies.html) -* [Informative worker strategy discussion on the bazel-discuss mailing list](https://groups.google.com/forum/#!msg/bazel-discuss/oAEnuhYOPm8/ol7hf4KWJgAJ) diff --git a/8.4.2/remote/rbe.mdx b/8.4.2/remote/rbe.mdx deleted file mode 100644 index 75d4a15..0000000 --- a/8.4.2/remote/rbe.mdx +++ /dev/null @@ -1,33 +0,0 @@ ---- -title: 'Remote Execution Overview' ---- - - - -This page covers the benefits, requirements, and options for running Bazel -with remote execution. - -By default, Bazel executes builds and tests on your local machine. Remote -execution of a Bazel build allows you to distribute build and test actions -across multiple machines, such as a datacenter. - -Remote execution provides the following benefits: - -* Faster build and test execution through scaling of nodes available - for parallel actions -* A consistent execution environment for a development team -* Reuse of build outputs across a development team - -Bazel uses an open-source -[gRPC protocol](https://github.com/bazelbuild/remote-apis) -to allow for remote execution and remote caching. - -For a list of commercially supported remote execution services as well as -self-service tools, see -[Remote Execution Services](https://www.bazel.build/remote-execution-services.html) - -## Requirements - -Remote execution of Bazel builds imposes a set of mandatory configuration -constraints on the build. For more information, see -[Adapting Bazel Rules for Remote Execution](/remote/rules). diff --git a/8.4.2/remote/rules.mdx b/8.4.2/remote/rules.mdx deleted file mode 100644 index 340ab02..0000000 --- a/8.4.2/remote/rules.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Adapting Bazel Rules for Remote Execution' ---- - - - -This page is intended for Bazel users writing custom build and test rules -who want to understand the requirements for Bazel rules in the context of -remote execution. - -Remote execution allows Bazel to execute actions on a separate platform, such as -a datacenter. Bazel uses a -[gRPC protocol](https://github.com/bazelbuild/remote-apis/blob/main/build/bazel/remote/execution/v2/remote_execution.proto) -for its remote execution. You can try remote execution with -[bazel-buildfarm](https://github.com/bazelbuild/bazel-buildfarm), -an open-source project that aims to provide a distributed remote execution -platform. - -This page uses the following terminology when referring to different -environment types or *platforms*: - -* **Host platform** - where Bazel runs. -* **Execution platform** - where Bazel actions run. -* **Target platform** - where the build outputs (and some actions) run. - -## Overview - -When configuring a Bazel build for remote execution, you must follow the -guidelines described in this page to ensure the build executes remotely -error-free. This is due to the nature of remote execution, namely: - -* **Isolated build actions.** Build tools do not retain state and dependencies - cannot leak between them. - -* **Diverse execution environments.** Local build configuration is not always - suitable for remote execution environments. - -This page describes the issues that can arise when implementing custom Bazel -build and test rules for remote execution and how to avoid them. It covers the -following topics: - -* [Invoking build tools through toolchain rules](#toolchain-rules) -* [Managing implicit dependencies](#manage-dependencies) -* [Managing platform-dependent binaries](#manage-binaries) -* [Managing configure-style WORKSPACE rules](#manage-workspace-rules) - -## Invoking build tools through toolchain rules - -A Bazel toolchain rule is a configuration provider that tells a build rule what -build tools, such as compilers and linkers, to use and how to configure them -using parameters defined by the rule's creator. A toolchain rule allows build -and test rules to invoke build tools in a predictable, preconfigured manner -that's compatible with remote execution. For example, use a toolchain rule -instead of invoking build tools via the `PATH`, `JAVA_HOME`, or other local -variables that may not be set to equivalent values (or at all) in the remote -execution environment. - -Toolchain rules currently exist for Bazel build and test rules for -[Scala](https://github.com/bazelbuild/rules_scala/blob/master/scala/scala_toolch -ain.bzl), -[Rust](https://github.com/bazelbuild/rules_rust/blob/main/rust/toolchain.bzl), -and [Go](https://github.com/bazelbuild/rules_go/blob/master/go/toolchains.rst), -and new toolchain rules are under way for other languages and tools such as -[bash](https://docs.google.com/document/d/e/2PACX-1vRCSB_n3vctL6bKiPkIa_RN_ybzoAccSe0ic8mxdFNZGNBJ3QGhcKjsL7YKf-ngVyjRZwCmhi_5KhcX/pub). -If a toolchain rule does not exist for the tool your rule uses, consider -[creating a toolchain rule](/extending/toolchains#creating-a-toolchain-rule). - -## Managing implicit dependencies - -If a build tool can access dependencies across build actions, those actions will -fail when remotely executed because each remote build action is executed -separately from others. Some build tools retain state across build actions and -access dependencies that have not been explicitly included in the tool -invocation, which will cause remotely executed build actions to fail. - -For example, when Bazel instructs a stateful compiler to locally build _foo_, -the compiler retains references to foo's build outputs. When Bazel then -instructs the compiler to build _bar_, which depends on _foo_, without -explicitly stating that dependency in the BUILD file for inclusion in the -compiler invocation, the action executes successfully as long as the same -compiler instance executes for both actions (as is typical for local execution). -However, since in a remote execution scenario each build action executes a -separate compiler instance, compiler state and _bar_'s implicit dependency on -_foo_ will be lost and the build will fail. - -To help detect and eliminate these dependency problems, Bazel 0.14.1 offers the -local Docker sandbox, which has the same restrictions for dependencies as remote -execution. Use the sandbox to prepare your build for remote execution by -identifying and resolving dependency-related build errors. See [Troubleshooting Bazel Remote Execution with Docker Sandbox](/remote/sandbox) -for more information. - -## Managing platform-dependent binaries - -Typically, a binary built on the host platform cannot safely execute on an -arbitrary remote execution platform due to potentially mismatched dependencies. -For example, the SingleJar binary supplied with Bazel targets the host platform. -However, for remote execution, SingleJar must be compiled as part of the process -of building your code so that it targets the remote execution platform. (See the -[target selection logic](https://github.com/bazelbuild/bazel/blob/130aeadfd660336572c3da397f1f107f0c89aa8d/tools/jdk/BUILD#L115).) - -Do not ship binaries of build tools required by your build with your source code -unless you are sure they will safely run in your execution platform. Instead, do -one of the following: - -* Ship or externally reference the source code for the tool so that it can be - built for the remote execution platform. - -* Pre-install the tool into the remote execution environment (for example, a - toolchain container) if it's stable enough and use toolchain rules to run it - in your build. - -## Managing configure-style WORKSPACE rules - -Bazel's `WORKSPACE` rules can be used for probing the host platform for tools -and libraries required by the build, which, for local builds, is also Bazel's -execution platform. If the build explicitly depends on local build tools and -artifacts, it will fail during remote execution if the remote execution platform -is not identical to the host platform. - -The following actions performed by `WORKSPACE` rules are not compatible with -remote execution: - -* **Building binaries.** Executing compilation actions in `WORKSPACE` rules - results in binaries that are incompatible with the remote execution platform - if different from the host platform. - -* **Installing `pip` packages.** `pip` packages installed via `WORKSPACE` - rules require that their dependencies be pre-installed on the host platform. - Such packages, built specifically for the host platform, will be - incompatible with the remote execution platform if different from the host - platform. - -* **Symlinking to local tools or artifacts.** Symlinks to tools or libraries - installed on the host platform created via `WORKSPACE` rules will cause the - build to fail on the remote execution platform as Bazel will not be able to - locate them. Instead, create symlinks using standard build actions so that - the symlinked tools and libraries are accessible from Bazel's `runfiles` - tree. Do not use [`repository_ctx.symlink`](/rules/lib/builtins/repository_ctx#symlink) - to symlink target files outside of the external repo directory. - -* **Mutating the host platform.** Avoid creating files outside of the Bazel - `runfiles` tree, creating environment variables, and similar actions, as - they may behave unexpectedly on the remote execution platform. - -To help find potential non-hermetic behavior you can use [Workspace rules log](/remote/workspace). - -If an external dependency executes specific operations dependent on the host -platform, you should split those operations between `WORKSPACE` and build -rules as follows: - -* **Platform inspection and dependency enumeration.** These operations are - safe to execute locally via `WORKSPACE` rules, which can check which - libraries are installed, download packages that must be built, and prepare - required artifacts for compilation. For remote execution, these rules must - also support using pre-checked artifacts to provide the information that - would normally be obtained during host platform inspection. Pre-checked - artifacts allow Bazel to describe dependencies as if they were local. Use - conditional statements or the `--override_repository` flag for this. - -* **Generating or compiling target-specific artifacts and platform mutation**. - These operations must be executed via regular build rules. Actions that - produce target-specific artifacts for external dependencies must execute - during the build. - -To more easily generate pre-checked artifacts for remote execution, you can use -`WORKSPACE` rules to emit generated files. You can run those rules on each new -execution environment, such as inside each toolchain container, and check the -outputs of your remote execution build in to your source repo to reference. - -For example, for Tensorflow's rules for [`cuda`](https://github.com/tensorflow/tensorflow/blob/master/third_party/gpus/cuda_configure.bzl) -and [`python`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl), -the `WORKSPACE` rules produce the following [`BUILD files`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/third_party/toolchains/cpus/py). -For local execution, files produced by checking the host environment are used. -For remote execution, a [conditional statement](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L304) -on an environment variable allows the rule to use files that are checked into -the repo. - -The `BUILD` files declare [`genrules`](https://github.com/tensorflow/tensorflow/blob/master/third_party/py/python_configure.bzl#L84) -that can run both locally and remotely, and perform the necessary processing -that was previously done via `repository_ctx.symlink` as shown [here](https://github.com/tensorflow/tensorflow/blob/d1ba01f81d8fa1d0171ba9ce871599063d5c7eb9/third_party/gpus/cuda_configure.bzl#L730). diff --git a/8.4.2/remote/sandbox.mdx b/8.4.2/remote/sandbox.mdx deleted file mode 100644 index 5e2e823..0000000 --- a/8.4.2/remote/sandbox.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: 'Troubleshooting Bazel Remote Execution with Docker Sandbox' ---- - - - -Bazel builds that succeed locally may fail when executed remotely due to -restrictions and requirements that do not affect local builds. The most common -causes of such failures are described in [Adapting Bazel Rules for Remote Execution](/remote/rules). - -This page describes how to identify and resolve the most common issues that -arise with remote execution using the Docker sandbox feature, which imposes -restrictions upon the build equal to those of remote execution. This allows you -to troubleshoot your build without the need for a remote execution service. - -The Docker sandbox feature mimics the restrictions of remote execution as -follows: - -* **Build actions execute in toolchain containers.** You can use the same - toolchain containers to run your build locally and remotely via a service - supporting containerized remote execution. - -* **No extraneous data crosses the container boundary.** Only explicitly - declared inputs and outputs enter and leave the container, and only after - the associated build action successfully completes. - -* **Each action executes in a fresh container.** A new, unique container is - created for each spawned build action. - -Note: Builds take noticeably more time to complete when the Docker sandbox -feature is enabled. This is normal. - -You can troubleshoot these issues using one of the following methods: - -* **[Troubleshooting natively.](#troubleshooting-natively)** With this method, - Bazel and its build actions run natively on your local machine. The Docker - sandbox feature imposes restrictions upon the build equal to those of remote - execution. However, this method will not detect local tools, states, and - data leaking into your build, which will cause problems with remote execution. - -* **[Troubleshooting in a Docker container.](#troubleshooting-docker-container)** - With this method, Bazel and its build actions run inside a Docker container, - which allows you to detect tools, states, and data leaking from the local - machine into the build in addition to imposing restrictions - equal to those of remote execution. This method provides insight into your - build even if portions of the build are failing. This method is experimental - and not officially supported. - -## Prerequisites - -Before you begin troubleshooting, do the following if you have not already done so: - -* Install Docker and configure the permissions required to run it. -* Install Bazel 0.14.1 or later. Earlier versions do not support the Docker - sandbox feature. -* Add the [bazel-toolchains](https://releases.bazel.build/bazel-toolchains.html) - repo, pinned to the latest release version, to your build's `WORKSPACE` file - as described [here](https://releases.bazel.build/bazel-toolchains.html). -* Add flags to your `.bazelrc` file to enable the feature. Create the file in - the root directory of your Bazel project if it does not exist. Flags below - are a reference sample. Please see the latest - [`.bazelrc`](https://github.com/bazelbuild/bazel-toolchains/tree/master/bazelrc) - file in the bazel-toolchains repo and copy the values of the flags defined - there for config `docker-sandbox`. - -``` -# Docker Sandbox Mode -build:docker-sandbox --host_javabase=<...> -build:docker-sandbox --javabase=<...> -build:docker-sandbox --crosstool_top=<...> -build:docker-sandbox --experimental_docker_image=<...> -build:docker-sandbox --spawn_strategy=docker --strategy=Javac=docker --genrule_strategy=docker -build:docker-sandbox --experimental_docker_verbose -build:docker-sandbox --experimental_enable_docker_sandbox -``` - -Note: The flags referenced in the `.bazelrc` file shown above are configured -to run within the [`rbe-ubuntu16-04`](https://console.cloud.google.com/launcher/details/google/rbe-ubuntu16-04) -container. - -If your rules require additional tools, do the following: - -1. Create a custom Docker container by installing tools using a [Dockerfile](https://docs.docker.com/engine/reference/builder/) - and [building](https://docs.docker.com/engine/reference/commandline/build/) - the image locally. - -2. Replace the value of the `--experimental_docker_image` flag above with the - name of your custom container image. - - -## Troubleshooting natively - -This method executes Bazel and all of its build actions directly on the local -machine and is a reliable way to confirm whether your build will succeed when -executed remotely. - -However, with this method, locally installed tools, binaries, and data may leak -into into your build, especially if it uses [configure-style WORKSPACE rules](/remote/rules#manage-workspace-rules). -Such leaks will cause problems with remote execution; to detect them, [troubleshoot in a Docker container](#troubleshooting-docker-container) -in addition to troubleshooting natively. - -### Step 1: Run the build - -1. Add the `--config=docker-sandbox` flag to the Bazel command that executes - your build. For example: - - ```posix-terminal - bazel --bazelrc=.bazelrc build --config=docker-sandbox {{ '' }}target{{ '' }} - ``` - -2. Run the build and wait for it to complete. The build will run up to four - times slower than normal due to the Docker sandbox feature. - -You may encounter the following error: - -```none {:.devsite-disable-click-to-copy} -ERROR: 'docker' is an invalid value for docker spawn strategy. -``` - -If you do, run the build again with the `--experimental_docker_verbose` flag. -This flag enables verbose error messages. This error is typically caused by a -faulty Docker installation or lack of permissions to execute it under the -current user account. See the [Docker documentation](https://docs.docker.com/install/linux/linux-postinstall/) -for more information. If problems persist, skip ahead to [Troubleshooting in a Docker container](#troubleshooting-docker-container). - -### Step 2: Resolve detected issues - -The following are the most commonly encountered issues and their workarounds. - -* **A file, tool, binary, or resource referenced by the Bazel runfiles tree is - missing.**. Confirm that all dependencies of the affected targets have been - [explicitly declared](/concepts/dependencies). See - [Managing implicit dependencies](/remote/rules#manage-dependencies) - for more information. - -* **A file, tool, binary, or resource referenced by an absolute path or the `PATH` - variable is missing.** Confirm that all required tools are installed within - the toolchain container and use [toolchain rules](/extending/toolchains) to properly - declare dependencies pointing to the missing resource. See - [Invoking build tools through toolchain rules](/remote/rules#invoking-build-tools-through-toolchain-rules) - for more information. - -* **A binary execution fails.** One of the build rules is referencing a binary - incompatible with the execution environment (the Docker container). See - [Managing platform-dependent binaries](/remote/rules#manage-binaries) - for more information. If you cannot resolve the issue, contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) - for help. - -* **A file from `@local-jdk` is missing or causing errors.** The Java binaries - on your local machine are leaking into the build while being incompatible with - it. Use [`java_toolchain`](/reference/be/java#java_toolchain) - in your rules and targets instead of `@local_jdk`. Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) if you need further help. - -* **Other errors.** Contact [bazel-discuss@google.com](mailto:bazel-discuss@google.com) for help. - -## Troubleshooting in a Docker container - -With this method, Bazel runs inside a host Docker container, and Bazel's build -actions execute inside individual toolchain containers spawned by the Docker -sandbox feature. The sandbox spawns a brand new toolchain container for each -build action and only one action executes in each toolchain container. - -This method provides more granular control of tools installed in the host -environment. By separating the execution of the build from the execution of its -build actions and keeping the installed tooling to a minimum, you can verify -whether your build has any dependencies on the local execution environment. - -### Step 1: Build the container - -Note: The commands below are tailored specifically for a `debian:stretch` base. -For other bases, modify them as necessary. - -1. Create a `Dockerfile` that creates the Docker container and installs Bazel - with a minimal set of build tools: - - ``` - FROM debian:stretch - - RUN apt-get update && apt-get install -y apt-transport-https curl software-properties-common git gcc gnupg2 g++ openjdk-8-jdk-headless python-dev zip wget vim - - RUN curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - - - RUN add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/debian $(lsb_release -cs) stable" - - RUN apt-get update && apt-get install -y docker-ce - - RUN wget https://releases.bazel.build//release/bazel--installer-linux-x86_64.sh -O ./bazel-installer.sh && chmod 755 ./bazel-installer.sh - - RUN ./bazel-installer.sh - ``` - -2. Build the container as `bazel_container`: - - ```posix-terminal - docker build -t bazel_container - < Dockerfile - ``` - -### Step 2: Start the container - -Start the Docker container using the command shown below. In the command, -substitute the path to the source code on your host that you want to build. - -```posix-terminal -docker run -it \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /tmp:/tmp \ - -v {{ '' }}your source code directory{{ '' }}:/src \ - -w /src \ - bazel_container \ - /bin/bash -``` - -This command runs the container as root, mapping the docker socket, and mounting -the `/tmp` directory. This allows Bazel to spawn other Docker containers and to -use directories under `/tmp` to share files with those containers. Your source -code is available at `/src` inside the container. - -The command intentionally starts from a `debian:stretch` base container that -includes binaries incompatible with the `rbe-ubuntu16-04` container used as a -toolchain container. If binaries from the local environment are leaking into the -toolchain container, they will cause build errors. - -### Step 3: Test the container - -Run the following commands from inside the Docker container to test it: - -```posix-terminal -docker ps - -bazel version -``` - -### Step 4: Run the build - -Run the build as shown below. The output user is root so that it corresponds to -a directory that is accessible with the same absolute path from inside the host -container in which Bazel runs, from the toolchain containers spawned by the Docker -sandbox feature in which Bazel's build actions are running, and from the local -machine on which the host and action containers run. - -```posix-terminal -bazel --output_user_root=/tmp/bazel_docker_root --bazelrc=.bazelrc \ build --config=docker-sandbox {{ '' }}target{{ '' }} -``` - -### Step 5: Resolve detected issues - -You can resolve build failures as follows: - -* If the build fails with an "out of disk space" error, you can increase this - limit by starting the host container with the flag `--memory=XX` where `XX` - is the allocated disk space in gigabytes. This is experimental and may - result in unpredictable behavior. - -* If the build fails during the analysis or loading phases, one or more of - your build rules declared in the WORKSPACE file are not compatible with - remote execution. See [Adapting Bazel Rules for Remote Execution](/remote/rules) - for possible causes and workarounds. - -* If the build fails for any other reason, see the troubleshooting steps in [Step 2: Resolve detected issues](#start-container). diff --git a/8.4.2/remote/workspace.mdx b/8.4.2/remote/workspace.mdx deleted file mode 100644 index ae0aea5..0000000 --- a/8.4.2/remote/workspace.mdx +++ /dev/null @@ -1,127 +0,0 @@ ---- -title: 'Finding Non-Hermetic Behavior in WORKSPACE Rules' ---- - - - -In the following, a host machine is the machine where Bazel runs. - -When using remote execution, the actual build and/or test steps are not -happening on the host machine, but are instead sent off to the remote execution -system. However, the steps involved in resolving workspace rules are happening -on the host machine. If your workspace rules access information about the -host machine for use during execution, your build is likely to break due to -incompatibilities between the environments. - -As part of [adapting Bazel rules for remote -execution](/remote/rules), you need to find such workspace rules -and fix them. This page describes how to find potentially problematic workspace -rules using the workspace log. - - -## Finding non-hermetic rules - -[Workspace rules](/reference/be/workspace) allow the developer to add dependencies to -external workspaces, but they are rich enough to allow arbitrary processing to -happen in the process. All related commands are happening locally and can be a -potential source of non-hermeticity. Usually non-hermetic behavior is -introduced through -[`repository_ctx`](/rules/lib/builtins/repository_ctx) which allows interacting -with the host machine. - -Starting with Bazel 0.18, you can get a log of some potentially non-hermetic -actions by adding the flag `--experimental_workspace_rules_log_file=[PATH]` to -your Bazel command. Here `[PATH]` is a filename under which the log will be -created. - -Things to note: - -* the log captures the events as they are executed. If some steps are - cached, they will not show up in the log, so to get a full result, don't - forget to run `bazel clean --expunge` beforehand. - -* Sometimes functions might be re-executed, in which case the related - events will show up in the log multiple times. - -* Workspace rules currently only log Starlark events. - - Note: These particular rules do not cause hermiticity concerns as long - as a hash is specified. - -To find what was executed during workspace initialization: - -1. Run `bazel clean --expunge`. This command will clean your local cache and - any cached repositories, ensuring that all initialization will be re-run. - -2. Add `--experimental_workspace_rules_log_file=/tmp/workspacelog` to your - Bazel command and run the build. - - This produces a binary proto file listing messages of type - [WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) - -3. Download the Bazel source code and navigate to the Bazel folder by using - the command below. You need the source code to be able to parse the - workspace log with the - [workspacelog parser](https://source.bazel.build/bazel/+/master:src/tools/workspacelog/). - - ```posix-terminal - git clone https://github.com/bazelbuild/bazel.git - - cd bazel - ``` - -4. In the Bazel source code repo, convert the whole workspace log to text. - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog > /tmp/workspacelog.txt - ``` - -5. The output may be quite verbose and include output from built in Bazel - rules. - - To exclude specific rules from the output, use `--exclude_rule` option. - For example: - - ```posix-terminal - bazel build src/tools/workspacelog:parser - - bazel-bin/src/tools/workspacelog/parser --log_path=/tmp/workspacelog \ - --exclude_rule "//external:local_config_cc" \ - --exclude_rule "//external:dep" > /tmp/workspacelog.txt - ``` - -5. Open `/tmp/workspacelog.txt` and check for unsafe operations. - -The log consists of -[WorkspaceEvent](https://source.bazel.build/bazel/+/master:src/main/java/com/google/devtools/build/lib/bazel/debug/workspace_log.proto?q=WorkspaceEvent) -messages outlining certain potentially non-hermetic actions performed on a -[`repository_ctx`](/rules/lib/builtins/repository_ctx). - -The actions that have been highlighted as potentially non-hermetic are as follows: - -* `execute`: executes an arbitrary command on the host environment. Check if - these may introduce any dependencies on the host environment. - -* `download`, `download_and_extract`: to ensure hermetic builds, make sure - that sha256 is specified - -* `file`, `template`: this is not non-hermetic in itself, but may be a mechanism - for introducing dependencies on the host environment into the repository. - Ensure that you understand where the input comes from, and that it does not - depend on the host environment. - -* `os`: this is not non-hermetic in itself, but an easy way to get dependencies - on the host environment. A hermetic build would generally not call this. - In evaluating whether your usage is hermetic, keep in mind that this is - running on the host and not on the workers. Getting environment specifics - from the host is generally not a good idea for remote builds. - -* `symlink`: this is normally safe, but look for red flags. Any symlinks to - outside the repository or to an absolute path would cause problems on the - remote worker. If the symlink is created based on host machine properties - it would probably be problematic as well. - -* `which`: checking for programs installed on the host is usually problematic - since the workers may have different configurations. diff --git a/8.4.2/rules/bzl-style.mdx b/8.4.2/rules/bzl-style.mdx deleted file mode 100644 index 941028a..0000000 --- a/8.4.2/rules/bzl-style.mdx +++ /dev/null @@ -1,212 +0,0 @@ ---- -title: '.bzl style guide' ---- - - - -This page covers basic style guidelines for Starlark and also includes -information on macros and rules. - -[Starlark](/rules/language) is a -language that defines how software is built, and as such it is both a -programming and a configuration language. - -You will use Starlark to write `BUILD` files, macros, and build rules. Macros and -rules are essentially meta-languages - they define how `BUILD` files are written. -`BUILD` files are intended to be simple and repetitive. - -All software is read more often than it is written. This is especially true for -Starlark, as engineers read `BUILD` files to understand dependencies of their -targets and details of their builds. This reading will often happen in passing, -in a hurry, or in parallel to accomplishing some other task. Consequently, -simplicity and readability are very important so that users can parse and -comprehend `BUILD` files quickly. - -When a user opens a `BUILD` file, they quickly want to know the list of targets in -the file; or review the list of sources of that C++ library; or remove a -dependency from that Java binary. Each time you add a layer of abstraction, you -make it harder for a user to do these tasks. - -`BUILD` files are also analyzed and updated by many different tools. Tools may not -be able to edit your `BUILD` file if it uses abstractions. Keeping your `BUILD` -files simple will allow you to get better tooling. As a code base grows, it -becomes more and more frequent to do changes across many `BUILD` files in order to -update a library or do a cleanup. - -Important: Do not create a variable or macro just to avoid some amount of -repetition in `BUILD` files. Your `BUILD` file should be easily readable both by -developers and tools. The -[DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) principle doesn't -really apply here. - -## General advice - -* Use [Buildifier](https://github.com/bazelbuild/buildtools/tree/master/buildifier#linter) - as a formatter and linter. -* Follow [testing guidelines](/rules/testing). - -## Style - -### Python style - -When in doubt, follow the -[PEP 8 style guide](https://www.python.org/dev/peps/pep-0008/) where possible. -In particular, use four rather than two spaces for indentation to follow the -Python convention. - -Since -[Starlark is not Python](/rules/language#differences-with-python), -some aspects of Python style do not apply. For example, PEP 8 advises that -comparisons to singletons be done with `is`, which is not an operator in -Starlark. - - -### Docstring - -Document files and functions using [docstrings](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Use a docstring at the top of each `.bzl` file, and a docstring for each public -function. - -### Document rules and aspects - -Rules and aspects, along with their attributes, as well as providers and their -fields, should be documented using the `doc` argument. - -### Naming convention - -* Variables and function names use lowercase with words separated by - underscores (`[a-z][a-z0-9_]*`), such as `cc_library`. -* Top-level private values start with one underscore. Bazel enforces that - private values cannot be used from other files. Local variables should not - use the underscore prefix. - -### Line length - -As in `BUILD` files, there is no strict line length limit as labels can be long. -When possible, try to use at most 79 characters per line (following Python's -style guide, [PEP 8](https://www.python.org/dev/peps/pep-0008/)). This guideline -should not be enforced strictly: editors should display more than 80 columns, -automated changes will frequently introduce longer lines, and humans shouldn't -spend time splitting lines that are already readable. - -### Keyword arguments - -In keyword arguments, spaces around the equal sign are preferred: - -```python -def fct(name, srcs): - filtered_srcs = my_filter(source = srcs) - native.cc_library( - name = name, - srcs = filtered_srcs, - testonly = True, - ) -``` - -### Boolean values - -Prefer values `True` and `False` (rather than of `1` and `0`) for boolean values -(such as when using a boolean attribute in a rule). - -### Use print only for debugging - -Do not use the `print()` function in production code; it is only intended for -debugging, and will spam all direct and indirect users of your `.bzl` file. The -only exception is that you may submit code that uses `print()` if it is disabled -by default and can only be enabled by editing the source -- for example, if all -uses of `print()` are guarded by `if DEBUG:` where `DEBUG` is hardcoded to -`False`. Be mindful of whether these statements are useful enough to justify -their impact on readability. - -## Macros - -A macro is a function which instantiates one or more rules during the loading -phase. In general, use rules whenever possible instead of macros. The build -graph seen by the user is not the same as the one used by Bazel during the -build - macros are expanded *before Bazel does any build graph analysis.* - -Because of this, when something goes wrong, the user will need to understand -your macro's implementation to troubleshoot build problems. Additionally, `bazel -query` results can be hard to interpret because targets shown in the results -come from macro expansion. Finally, aspects are not aware of macros, so tooling -depending on aspects (IDEs and others) might fail. - -A safe use for macros is for defining additional targets intended to be -referenced directly at the Bazel CLI or in BUILD files: In that case, only the -*end users* of those targets need to know about them, and any build problems -introduced by macros are never far from their usage. - -For macros that define generated targets (implementation details of the macro -which are not supposed to be referred to at the CLI or depended on by targets -not instantiated by that macro), follow these best practices: - -* A macro should take a `name` argument and define a target with that name. - That target becomes that macro's *main target*. -* Generated targets, that is all other targets defined by a macro, should: - * Have their names prefixed by `` or `_`. For example, using - `name = '%s_bar' % (name)`. - * Have restricted visibility (`//visibility:private`), and - * Have a `manual` tag to avoid expansion in wildcard targets (`:all`, - `...`, `:*`, etc). -* The `name` should only be used to derive names of targets defined by the - macro, and not for anything else. For example, don't use the name to derive - a dependency or input file that is not generated by the macro itself. -* All the targets created in the macro should be coupled in some way to the - main target. -* Conventionally, `name` should be the first argument when defining a macro. -* Keep the parameter names in the macro consistent. If a parameter is passed - as an attribute value to the main target, keep its name the same. If a macro - parameter serves the same purpose as a common rule attribute, such as - `deps`, name as you would the attribute (see below). -* When calling a macro, use only keyword arguments. This is consistent with - rules, and greatly improves readability. - -Engineers often write macros when the Starlark API of relevant rules is -insufficient for their specific use case, regardless of whether the rule is -defined within Bazel in native code, or in Starlark. If you're facing this -problem, ask the rule author if they can extend the API to accomplish your -goals. - -As a rule of thumb, the more macros resemble the rules, the better. - -See also [macros](/extending/macros#conventions). - -## Rules - -* Rules, aspects, and their attributes should use lower_case names ("snake - case"). -* Rule names are nouns that describe the main kind of artifact produced by the - rule, from the point of view of its dependencies (or for leaf rules, the - user). This is not necessarily a file suffix. For instance, a rule that - produces C++ artifacts meant to be used as Python extensions might be called - `py_extension`. For most languages, typical rules include: - * `*_library` - a compilation unit or "module". - * `*_binary` - a target producing an executable or a deployment unit. - * `*_test` - a test target. This can include multiple tests. Expect all - tests in a `*_test` target to be variations on the same theme, for - example, testing a single library. - * `*_import`: a target encapsulating a pre-compiled artifact, such as a - `.jar`, or a `.dll` that is used during compilation. -* Use consistent names and types for attributes. Some generally applicable - attributes include: - * `srcs`: `label_list`, allowing files: source files, typically - human-authored. - * `deps`: `label_list`, typically *not* allowing files: compilation - dependencies. - * `data`: `label_list`, allowing files: data files, such as test data etc. - * `runtime_deps`: `label_list`: runtime dependencies that are not needed - for compilation. -* For any attributes with non-obvious behavior (for example, string templates - with special substitutions, or tools that are invoked with specific - requirements), provide documentation using the `doc` keyword argument to the - attribute's declaration (`attr.label_list()` or similar). -* Rule implementation functions should almost always be private functions - (named with a leading underscore). A common style is to give the - implementation function for `myrule` the name `_myrule_impl`. -* Pass information between your rules using a well-defined - [provider](/extending/rules#providers) interface. Declare and document provider - fields. -* Design your rule with extensibility in mind. Consider that other rules might - want to interact with your rule, access your providers, and reuse the - actions you create. -* Follow [performance guidelines](/rules/performance) in your rules. diff --git a/8.4.2/rules/challenges.mdx b/8.4.2/rules/challenges.mdx deleted file mode 100644 index 10ff737..0000000 --- a/8.4.2/rules/challenges.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Challenges of Writing Rules' ---- - - - -This page gives a high-level overview of the specific issues and challenges -of writing efficient Bazel rules. - -## Summary Requirements - -* Assumption: Aim for Correctness, Throughput, Ease of Use & Latency -* Assumption: Large Scale Repositories -* Assumption: BUILD-like Description Language -* Historic: Hard Separation between Loading, Analysis, and Execution is - Outdated, but still affects the API -* Intrinsic: Remote Execution and Caching are Hard -* Intrinsic: Using Change Information for Correct and Fast Incremental Builds - requires Unusual Coding Patterns -* Intrinsic: Avoiding Quadratic Time and Memory Consumption is Hard - -## Assumptions - -Here are some assumptions made about the build system, such as need for -correctness, ease of use, throughput, and large scale repositories. The -following sections address these assumptions and offer guidelines to ensure -rules are written in an effective manner. - -### Aim for correctness, throughput, ease of use & latency - -We assume that the build system needs to be first and foremost correct with -respect to incremental builds. For a given source tree, the output of the -same build should always be the same, regardless of what the output tree looks -like. In the first approximation, this means Bazel needs to know every single -input that goes into a given build step, such that it can rerun that step if any -of the inputs change. There are limits to how correct Bazel can get, as it leaks -some information such as date / time of the build, and ignores certain types of -changes such as changes to file attributes. [Sandboxing](/docs/sandboxing) -helps ensure correctness by preventing reads to undeclared input files. Besides -the intrinsic limits of the system, there are a few known correctness issues, -most of which are related to Fileset or the C++ rules, which are both hard -problems. We have long-term efforts to fix these. - -The second goal of the build system is to have high throughput; we are -permanently pushing the boundaries of what can be done within the current -machine allocation for a remote execution service. If the remote execution -service gets overloaded, nobody can get work done. - -Ease of use comes next. Of multiple correct approaches with the same (or -similar) footprint of the remote execution service, we choose the one that is -easier to use. - -Latency denotes the time it takes from starting a build to getting the intended -result, whether that is a test log from a passing or failing test, or an error -message that a `BUILD` file has a typo. - -Note that these goals often overlap; latency is as much a function of throughput -of the remote execution service as is correctness relevant for ease of use. - -### Large scale repositories - -The build system needs to operate at the scale of large repositories where large -scale means that it does not fit on a single hard drive, so it is impossible to -do a full checkout on virtually all developer machines. A medium-sized build -will need to read and parse tens of thousands of `BUILD` files, and evaluate -hundreds of thousands of globs. While it is theoretically possible to read all -`BUILD` files on a single machine, we have not yet been able to do so within a -reasonable amount of time and memory. As such, it is critical that `BUILD` files -can be loaded and parsed independently. - -### BUILD-like description language - -In this context, we assume a configuration language that is -roughly similar to `BUILD` files in declaration of library and binary rules -and their interdependencies. `BUILD` files can be read and parsed independently, -and we avoid even looking at source files whenever we can (except for -existence). - -## Historic - -There are differences between Bazel versions that cause challenges and some -of these are outlined in the following sections. - -### Hard separation between loading, analysis, and execution is outdated but still affects the API - -Technically, it is sufficient for a rule to know the input and output files of -an action just before the action is sent to remote execution. However, the -original Bazel code base had a strict separation of loading packages, then -analyzing rules using a configuration (command-line flags, essentially), and -only then running any actions. This distinction is still part of the rules API -today, even though the core of Bazel no longer requires it (more details below). - -That means that the rules API requires a declarative description of the rule -interface (what attributes it has, types of attributes). There are some -exceptions where the API allows custom code to run during the loading phase to -compute implicit names of output files and implicit values of attributes. For -example, a java_library rule named 'foo' implicitly generates an output named -'libfoo.jar', which can be referenced from other rules in the build graph. - -Furthermore, the analysis of a rule cannot read any source files or inspect the -output of an action; instead, it needs to generate a partial directed bipartite -graph of build steps and output file names that is only determined from the rule -itself and its dependencies. - -## Intrinsic - -There are some intrinsic properties that make writing rules challenging and -some of the most common ones are described in the following sections. - -### Remote execution and caching are hard - -Remote execution and caching improve build times in large repositories by -roughly two orders of magnitude compared to running the build on a single -machine. However, the scale at which it needs to perform is staggering: Google's -remote execution service is designed to handle a huge number of requests per -second, and the protocol carefully avoids unnecessary roundtrips as well as -unnecessary work on the service side. - -At this time, the protocol requires that the build system knows all inputs to a -given action ahead of time; the build system then computes a unique action -fingerprint, and asks the scheduler for a cache hit. If a cache hit is found, -the scheduler replies with the digests of the output files; the files itself are -addressed by digest later on. However, this imposes restrictions on the Bazel -rules, which need to declare all input files ahead of time. - -### Using change information for correct and fast incremental builds requires unusual coding patterns - -Above, we argued that in order to be correct, Bazel needs to know all the input -files that go into a build step in order to detect whether that build step is -still up-to-date. The same is true for package loading and rule analysis, and we -have designed [Skyframe](/reference/skyframe) to handle this -in general. Skyframe is a graph library and evaluation framework that takes a -goal node (such as 'build //foo with these options'), and breaks it down into -its constituent parts, which are then evaluated and combined to yield this -result. As part of this process, Skyframe reads packages, analyzes rules, and -executes actions. - -At each node, Skyframe tracks exactly which nodes any given node used to compute -its own output, all the way from the goal node down to the input files (which -are also Skyframe nodes). Having this graph explicitly represented in memory -allows the build system to identify exactly which nodes are affected by a given -change to an input file (including creation or deletion of an input file), doing -the minimal amount of work to restore the output tree to its intended state. - -As part of this, each node performs a dependency discovery process. Each -node can declare dependencies, and then use the contents of those dependencies -to declare even further dependencies. In principle, this maps well to a -thread-per-node model. However, medium-sized builds contain hundreds of -thousands of Skyframe nodes, which isn't easily possible with current Java -technology (and for historical reasons, we're currently tied to using Java, so -no lightweight threads and no continuations). - -Instead, Bazel uses a fixed-size thread pool. However, that means that if a node -declares a dependency that isn't available yet, we may have to abort that -evaluation and restart it (possibly in another thread), when the dependency is -available. This, in turn, means that nodes should not do this excessively; a -node that declares N dependencies serially can potentially be restarted N times, -costing O(N^2) time. Instead, we aim for up-front bulk declaration of -dependencies, which sometimes requires reorganizing the code, or even splitting -a node into multiple nodes to limit the number of restarts. - -Note that this technology isn't currently available in the rules API; instead, -the rules API is still defined using the legacy concepts of loading, analysis, -and execution phases. However, a fundamental restriction is that all accesses to -other nodes have to go through the framework so that it can track the -corresponding dependencies. Regardless of the language in which the build system -is implemented or in which the rules are written (they don't have to be the -same), rule authors must not use standard libraries or patterns that bypass -Skyframe. For Java, that means avoiding java.io.File as well as any form of -reflection, and any library that does either. Libraries that support dependency -injection of these low-level interfaces still need to be setup correctly for -Skyframe. - -This strongly suggests to avoid exposing rule authors to a full language runtime -in the first place. The danger of accidental use of such APIs is just too big - -several Bazel bugs in the past were caused by rules using unsafe APIs, even -though the rules were written by the Bazel team or other domain experts. - -### Avoiding quadratic time and memory consumption is hard - -To make matters worse, apart from the requirements imposed by Skyframe, the -historical constraints of using Java, and the outdatedness of the rules API, -accidentally introducing quadratic time or memory consumption is a fundamental -problem in any build system based on library and binary rules. There are two -very common patterns that introduce quadratic memory consumption (and therefore -quadratic time consumption). - -1. Chains of Library Rules - -Consider the case of a chain of library rules A depends on B, depends on C, and -so on. Then, we want to compute some property over the transitive closure of -these rules, such as the Java runtime classpath, or the C++ linker command for -each library. Naively, we might take a standard list implementation; however, -this already introduces quadratic memory consumption: the first library -contains one entry on the classpath, the second two, the third three, and so -on, for a total of 1+2+3+...+N = O(N^2) entries. - -2. Binary Rules Depending on the Same Library Rules - -Consider the case where a set of binaries that depend on the same library -rules — such as if you have a number of test rules that test the same -library code. Let's say out of N rules, half the rules are binary rules, and -the other half library rules. Now consider that each binary makes a copy of -some property computed over the transitive closure of library rules, such as -the Java runtime classpath, or the C++ linker command line. For example, it -could expand the command line string representation of the C++ link action. N/2 -copies of N/2 elements is O(N^2) memory. - -#### Custom collections classes to avoid quadratic complexity - -Bazel is heavily affected by both of these scenarios, so we introduced a set of -custom collection classes that effectively compress the information in memory by -avoiding the copy at each step. Almost all of these data structures have set -semantics, so we called it -[depset](/rules/lib/depset) -(also known as `NestedSet` in the internal implementation). The majority of -changes to reduce Bazel's memory consumption over the past several years were -changes to use depsets instead of whatever was previously used. - -Unfortunately, usage of depsets does not automatically solve all the issues; -in particular, even just iterating over a depset in each rule re-introduces -quadratic time consumption. Internally, NestedSets also has some helper methods -to facilitate interoperability with normal collections classes; unfortunately, -accidentally passing a NestedSet to one of these methods leads to copying -behavior, and reintroduces quadratic memory consumption. diff --git a/8.4.2/rules/deploying.mdx b/8.4.2/rules/deploying.mdx deleted file mode 100644 index 3fe2c86..0000000 --- a/8.4.2/rules/deploying.mdx +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: 'Deploying Rules' ---- - - - -This page is for rule writers who are planning to make their rules available -to others. - -We recommend you start a new ruleset from the template repository: -https://github.com/bazel-contrib/rules-template -That template follows the recommendations below, and includes API documentation generation -and sets up a CI/CD pipeline to make it trivial to distribute your ruleset. - -## Hosting and naming rules - -New rules should go into their own GitHub repository under your organization. -Start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules belong in the [bazelbuild](https://github.com/bazelbuild) -organization. - -Repository names for Bazel rules are standardized on the following format: -`$ORGANIZATION/rules_$NAME`. -See [examples on GitHub](https://github.com/search?q=rules+bazel&type=Repositories). -For consistency, you should follow this same format when publishing your Bazel rules. - -Make sure to use a descriptive GitHub repository description and `README.md` -title, example: - -* Repository name: `bazelbuild/rules_go` -* Repository description: *Go rules for Bazel* -* Repository tags: `golang`, `bazel` -* `README.md` header: *Go rules for [Bazel](https://bazel.build)* -(note the link to https://bazel.build which will guide users who are unfamiliar -with Bazel to the right place) - -Rules can be grouped either by language (such as Scala), runtime platform -(such as Android), or framework (such as Spring). - -## Repository content - -Every rule repository should have a certain layout so that users can quickly -understand new rules. - -For example, when writing new rules for the (make-believe) -`mockascript` language, the rule repository would have the following structure: - -``` -/ - LICENSE - README - MODULE.bazel - mockascript/ - constraints/ - BUILD - runfiles/ - BUILD - runfiles.mocs - BUILD - defs.bzl - tests/ - BUILD - some_test.sh - another_test.py - examples/ - BUILD - bin.mocs - lib.mocs - test.mocs -``` - -### MODULE.bazel - -In the project's `MODULE.bazel`, you should define the name that users will use -to reference your rules. If your rules belong to the -[bazelbuild](https://github.com/bazelbuild) organization, you must use -`rules_` (such as `rules_mockascript`). Otherwise, you should name your -repository `_rules_` (such as `build_stack_rules_proto`). Please -start a thread on [GitHub](https://github.com/bazelbuild/bazel/discussions) -if you feel like your rules should follow the convention for rules in the -[bazelbuild](https://github.com/bazelbuild) organization. - -In the following sections, assume the repository belongs to the -[bazelbuild](https://github.com/bazelbuild) organization. - -``` -module(name = "rules_mockascript") -``` - -### README - -At the top level, there should be a `README` that contains a brief description -of your ruleset, and the API users should expect. - -### Rules - -Often times there will be multiple rules provided by your repository. Create a -directory named by the language and provide an entry point - `defs.bzl` file -exporting all rules (also include a `BUILD` file so the directory is a package). -For `rules_mockascript` that means there will be a directory named -`mockascript`, and a `BUILD` file and a `defs.bzl` file inside: - -``` -/ - mockascript/ - BUILD - defs.bzl -``` - -### Constraints - -If your rule defines -[toolchain](/extending/toolchains) rules, -it's possible that you'll need to define custom `constraint_setting`s and/or -`constraint_value`s. Put these into a `///constraints` package. Your -directory structure will look like this: - -``` -/ - mockascript/ - constraints/ - BUILD - BUILD - defs.bzl -``` - -Please read -[github.com/bazelbuild/platforms](https://github.com/bazelbuild/platforms) -for best practices, and to see what constraints are already present, and -consider contributing your constraints there if they are language independent. -Be mindful of introducing custom constraints, all users of your rules will -use them to perform platform specific logic in their `BUILD` files (for example, -using [selects](/reference/be/functions#select)). -With custom constraints, you define a language that the whole Bazel ecosystem -will speak. - -### Runfiles library - -If your rule provides a standard library for accessing runfiles, it should be -in the form of a library target located at `///runfiles` (an abbreviation -of `///runfiles:runfiles`). User targets that need to access their data -dependencies will typically add this target to their `deps` attribute. - -### Repository rules - -#### Dependencies - -Your rules might have external dependencies, which you'll need to specify in -your MODULE.bazel file. - -#### Registering toolchains - -Your rules might also register toolchains, which you can also specify in the -MODULE.bazel file. - -Note that in order to resolve toolchains in the analysis phase Bazel needs to -analyze all `toolchain` targets that are registered. Bazel will not need to -analyze all targets referenced by `toolchain.toolchain` attribute. If in order -to register toolchains you need to perform complex computation in the -repository, consider splitting the repository with `toolchain` targets from the -repository with `_toolchain` targets. Former will be always fetched, and -the latter will only be fetched when user actually needs to build `` code. - - -#### Release snippet - -In your release announcement provide a snippet that your users can copy-paste -into their `MODULE.bazel` file. This snippet in general will look as follows: - -``` -bazel_dep(name = "rules_", version = "") -``` - - -### Tests - -There should be tests that verify that the rules are working as expected. This -can either be in the standard location for the language the rules are for or a -`tests/` directory at the top level. - -### Examples (optional) - -It is useful to users to have an `examples/` directory that shows users a couple -of basic ways that the rules can be used. - -## CI/CD - -Many rulesets use GitHub Actions. See the configuration used in the [rules-template](https://github.com/bazel-contrib/rules-template/tree/main/.github/workflows) repo, which are simplified using a "reusable workflow" hosted in the bazel-contrib -org. `ci.yaml` runs tests on each PR and `main` comit, and `release.yaml` runs anytime you push a tag to the repository. -See comments in the rules-template repo for more information. - -If your repository is under the [bazelbuild organization](https://github.com/bazelbuild), -you can [ask to add](https://github.com/bazelbuild/continuous-integration/issues/new?template=adding-your-project-to-bazel-ci.md&title=Request+to+add+new+project+%5BPROJECT_NAME%5D&labels=new-project) -it to [ci.bazel.build](http://ci.bazel.build). - -## Documentation - -See the [Stardoc documentation](https://github.com/bazelbuild/stardoc) for -instructions on how to comment your rules so that documentation can be generated -automatically. - -The [rules-template docs/ folder](https://github.com/bazel-contrib/rules-template/tree/main/docs) -shows a simple way to ensure the Markdown content in the `docs/` folder is always up-to-date -as Starlark files are updated. - -## FAQs - -### Why can't we add our rule to the main Bazel GitHub repository? - -We want to decouple rules from Bazel releases as much as possible. It's clearer -who owns individual rules, reducing the load on Bazel developers. For our users, -decoupling makes it easier to modify, upgrade, downgrade, and replace rules. -Contributing to rules can be lighter weight than contributing to Bazel - -depending on the rules -, including full submit access to the corresponding -GitHub repository. Getting submit access to Bazel itself is a much more involved -process. - -The downside is a more complicated one-time installation process for our users: -they have to add a dependency on your ruleset in their `MODULE.bazel` file. - -We used to have all of the rules in the Bazel repository (under -`//tools/build_rules` or `//tools/build_defs`). We still have a couple rules -there, but we are working on moving the remaining rules out. diff --git a/8.4.2/rules/errors/read-only-variable.mdx b/8.4.2/rules/errors/read-only-variable.mdx deleted file mode 100644 index 2bfde65..0000000 --- a/8.4.2/rules/errors/read-only-variable.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: 'Error: Variable x is read only' ---- - - - -A global variable cannot be reassigned. It will always point to the same object. -However, its content might change, if the value is mutable (for example, the -content of a list). Local variables don't have this restriction. - -```python -a = [1, 2] - -a[1] = 3 - -b = 3 - -b = 4 # forbidden -``` - -`ERROR: /path/ext.bzl:7:1: Variable b is read only` - -You will get a similar error if you try to redefine a function (function -overloading is not supported), for example: - -```python -def foo(x): return x + 1 - -def foo(x, y): return x + y # forbidden -``` diff --git a/8.4.2/rules/faq.mdx b/8.4.2/rules/faq.mdx deleted file mode 100644 index 5321f0b..0000000 --- a/8.4.2/rules/faq.mdx +++ /dev/null @@ -1,80 +0,0 @@ ---- -title: 'Frequently Asked Questions' ---- - - - -These are some common issues and questions with writing extensions. - -## Why is my file not produced / my action never executed? - -Bazel only executes the actions needed to produce the *requested* output files. - -* If the file you want has a label, you can request it directly: - `bazel build //pkg:myfile.txt` - -* If the file is in an output group of the target, you may need to specify that - output group on the command line: - `bazel build //pkg:mytarget --output_groups=foo` - -* If you want the file to be built automatically whenever your target is - mentioned on the command line, add it to your rule's default outputs by - returning a [`DefaultInfo`](lib/globals#DefaultInfo) provider. - -See the [Rules page](/extending/rules#requesting-output-files) for more information. - -## Why is my implementation function not executed? - -Bazel analyzes only the targets that are requested for the build. You should -either name the target on the command line, or something that depends on the -target. - -## A file is missing when my action or binary is executed - -Make sure that 1) the file has been registered as an input to the action or -binary, and 2) the script or tool being executed is accessing the file using the -correct path. - -For actions, you declare inputs by passing them to the `ctx.actions.*` function -that creates the action. The proper path for the file can be obtained using -[`File.path`](lib/File#path). - -For binaries (the executable outputs run by a `bazel run` or `bazel test` -command), you declare inputs by including them in the -[runfiles](/extending/rules#runfiles). Instead of using the `path` field, use -[`File.short_path`](lib/File#short_path), which is file's path relative to -the runfiles directory in which the binary executes. - -## How can I control which files are built by `bazel build //pkg:mytarget`? - -Use the [`DefaultInfo`](lib/globals#DefaultInfo) provider to -[set the default outputs](/extending/rules#requesting-output-files). - -## How can I run a program or do file I/O as part of my build? - -A tool can be declared as a target, just like any other part of your build, and -run during the execution phase to help build other targets. To create an action -that runs a tool, use [`ctx.actions.run`](lib/actions#run) and pass in the -tool as the `executable` parameter. - -During the loading and analysis phases, a tool *cannot* run, nor can you perform -file I/O. This means that tools and file contents (except the contents of BUILD -and .bzl files) cannot affect how the target and action graphs get created. - -## What if I need to access the same structured data both before and during the execution phase? - -You can format the structured data as a .bzl file. You can `load()` the file to -access it during the loading and analysis phases. You can pass it as an input or -runfile to actions and executables that need it during the execution phase. - -## How should I document Starlark code? - -For rules and rule attributes, you can pass a docstring literal (possibly -triple-quoted) to the `doc` parameter of `rule` or `attr.*()`. For helper -functions and macros, use a triple-quoted docstring literal following the format -given [here](https://github.com/bazelbuild/buildtools/blob/master/WARNINGS.md#function-docstring). -Rule implementation functions generally do not need their own docstring. - -Using string literals in the expected places makes it easier for automated -tooling to extract documentation. Feel free to use standard non-string comments -wherever it may help the reader of your code. diff --git a/8.4.2/rules/index.mdx b/8.4.2/rules/index.mdx deleted file mode 100644 index 2a6c3eb..0000000 --- a/8.4.2/rules/index.mdx +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: 'Rules' ---- - - - -The Bazel ecosystem has a growing and evolving set of rules to support popular -languages and packages. Much of Bazel's strength comes from the ability to -[define new rules](/extending/concepts) that can be used by others. - -This page describes the recommended, native, and non-native Bazel rules. - -## Recommended rules - -Here is a selection of recommended rules: - -* [Android](/docs/bazel-and-android) -* [C / C++](/docs/bazel-and-cpp) -* [Docker/OCI](https://github.com/bazel-contrib/rules_oci) -* [Go](https://github.com/bazelbuild/rules_go) -* [Haskell](https://github.com/tweag/rules_haskell) -* [Java](/docs/bazel-and-java) -* [JavaScript / NodeJS](https://github.com/bazelbuild/rules_nodejs) -* [Maven dependency management](https://github.com/bazelbuild/rules_jvm_external) -* [Objective-C](/docs/bazel-and-apple) -* [Package building](https://github.com/bazelbuild/rules_pkg) -* [Protocol Buffers](https://github.com/bazelbuild/rules_proto#protobuf-rules-for-bazel) -* [Python](https://github.com/bazelbuild/rules_python) -* [Rust](https://github.com/bazelbuild/rules_rust) -* [Scala](https://github.com/bazelbuild/rules_scala) -* [Shell](/reference/be/shell) -* [Webtesting](https://github.com/bazelbuild/rules_webtesting) (Webdriver) - -The repository [Skylib](https://github.com/bazelbuild/bazel-skylib) contains -additional functions that can be useful when writing new rules and new -macros. - -The rules above were reviewed and follow our -[requirements for recommended rules](/community/recommended-rules). -Contact the respective rule set's maintainers regarding issues and feature -requests. - -To find more Bazel rules, use a search engine, take a look on -[awesomebazel.com](https://awesomebazel.com/), or search on -[GitHub](https://github.com/search?o=desc&q=bazel+rules&s=stars&type=Repositories). - -## Native rules that do not apply to a specific programming language - -Native rules are shipped with the Bazel binary, they are always available in -BUILD files without a `load` statement. - -* Extra actions - - [`extra_action`](/reference/be/extra-actions#extra_action) - - [`action_listener`](/reference/be/extra-actions#action_listener) -* General - - [`filegroup`](/reference/be/general#filegroup) - - [`genquery`](/reference/be/general#genquery) - - [`test_suite`](/reference/be/general#test_suite) - - [`alias`](/reference/be/general#alias) - - [`config_setting`](/reference/be/general#config_setting) - - [`genrule`](/reference/be/general#genrule) -* Platform - - [`constraint_setting`](/reference/be/platforms-and-toolchains#constraint_setting) - - [`constraint_value`](/reference/be/platforms-and-toolchains#constraint_value) - - [`platform`](/reference/be/platforms-and-toolchains#platform) - - [`toolchain`](/reference/be/platforms-and-toolchains#toolchain) - - [`toolchain_type`](/reference/be/platforms-and-toolchains#toolchain_type) -* Workspace - - [`bind`](/reference/be/workspace#bind) - - [`local_repository`](/reference/be/workspace#local_repository) - - [`new_local_repository`](/reference/be/workspace#new_local_repository) - - [`xcode_config`](/reference/be/objective-c#xcode_config) - - [`xcode_version`](/reference/be/objective-c#xcode_version) - -## Embedded non-native rules - -Bazel also embeds additional rules written in [Starlark](/rules/language). Those can be loaded from -the `@bazel_tools` built-in external repository. - -* Repository rules - - [`git_repository`](/rules/lib/repo/git#git_repository) - - [`http_archive`](/rules/lib/repo/http#http_archive) - - [`http_file`](/rules/lib/repo/http#http_archive) - - [`http_jar`](/rules/lib/repo/http#http_jar) - - [Utility functions on patching](/rules/lib/repo/utils) diff --git a/8.4.2/rules/legacy-macro-tutorial.mdx b/8.4.2/rules/legacy-macro-tutorial.mdx deleted file mode 100644 index 44cdcfb..0000000 --- a/8.4.2/rules/legacy-macro-tutorial.mdx +++ /dev/null @@ -1,98 +0,0 @@ ---- -title: 'Creating a Legacy Macro' ---- - - - -IMPORTANT: This tutorial is for [*legacy macros*](/extending/legacy-macros). If -you only need to support Bazel 8 or newer, we recommend using [symbolic -macros](/extending/macros) instead; take a look at [Creating a Symbolic -Macro](../macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a legacy macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define a function in a separate `.bzl` file, and call the file `miniature.bzl`: - -```starlark -def miniature(name, src, size = "100x100", **kwargs): - """Create a miniature of the src image. - - The generated file is prefixed with 'small_'. - """ - native.genrule( - name = name, - srcs = [src], - # Note that the line below will fail if `src` is not a filename string - outs = ["small_" + src], - cmd = "convert $< -resize " + size + " $@", - **kwargs - ) -``` - -A few remarks: - - * By convention, legacy macros have a `name` argument, just like rules. - - * To document the behavior of a legacy macro, use - [docstring](https://www.python.org/dev/peps/pep-0257/) like in Python. - - * To call a `genrule`, or any other native rule, prefix with `native.`. - - * Use `**kwargs` to forward the extra arguments to the underlying `genrule` - (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful, so that a user can use standard attributes like - `visibility`, or `tags`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -And finally, a **warning note**: the macro assumes that `src` is a filename -string (otherwise, `outs = ["small_" + src]` will fail). So `src = "image.png"` -works; but what happens if the `BUILD` file instead used `src = -"//other/package:image.png"`, or even `src = select(...)`? - -You should make sure to declare such assumptions in your macro's documentation. -Unfortunately, legacy macros, especially large ones, tend to be fragile because -it can be hard to notice and document all such assumptions in your code – and, -of course, some users of the macro won't read the documentation. We recommend, -if possible, instead using [symbolic macros](/extending/macros), which have -built\-in checks on attribute types. diff --git a/8.4.2/rules/macro-tutorial.mdx b/8.4.2/rules/macro-tutorial.mdx deleted file mode 100644 index 93825aa..0000000 --- a/8.4.2/rules/macro-tutorial.mdx +++ /dev/null @@ -1,116 +0,0 @@ ---- -title: 'Creating a Symbolic Macro' ---- - - - -IMPORTANT: This tutorial is for [*symbolic macros*](/extending/macros) – the new -macro system introduced in Bazel 8. If you need to support older Bazel versions, -you will want to write a [legacy macro](/extending/legacy-macros) instead; take -a look at [Creating a Legacy Macro](../legacy-macro-tutorial). - -Imagine that you need to run a tool as part of your build. For example, you -may want to generate or preprocess a source file, or compress a binary. In this -tutorial, you are going to create a symbolic macro that resizes an image. - -Macros are suitable for simple tasks. If you want to do anything more -complicated, for example add support for a new programming language, consider -creating a [rule](/extending/rules). Rules give you more control and flexibility. - -The easiest way to create a macro that resizes an image is to use a `genrule`: - -```starlark -genrule( - name = "logo_miniature", - srcs = ["logo.png"], - outs = ["small_logo.png"], - cmd = "convert $< -resize 100x100 $@", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` - -If you need to resize more images, you may want to reuse the code. To do that, -define an *implementation function* and a *macro declaration* in a separate -`.bzl` file, and call the file `miniature.bzl`: - -```starlark -# Implementation function -def _miniature_impl(name, visibility, src, size, **kwargs): - native.genrule( - name = name, - visibility = visibility, - srcs = [src], - outs = [name + "_small_" + src.name], - cmd = "convert $< -resize " + size + " $@", - **kwargs, - ) - -# Macro declaration -miniature = macro( - doc = """Create a miniature of the src image. - - The generated file name will be prefixed with `name + "_small_"`. - """, - implementation = _miniature_impl, - # Inherit most of genrule's attributes (such as tags and testonly) - inherit_attrs = native.genrule, - attrs = { - "src": attr.label( - doc = "Image file", - allow_single_file = True, - # Non-configurable because our genrule's output filename is - # suffixed with src's name. (We want to suffix the output file with - # srcs's name because some tools that operate on image files expect - # the files to have the right file extension.) - configurable = False, - ), - "size": attr.string( - doc = "Output size in WxH format", - default = "100x100", - ), - # Do not allow callers of miniature() to set srcs, cmd, or outs - - # _miniature_impl overrides their values when calling native.genrule() - "srcs": None, - "cmd": None, - "outs": None, - }, -) -``` - -A few remarks: - - * Symbolic macro implementation functions must have `name` and `visibility` - parameters. They should used for the macro's main target. - - * To document the behavior of a symbolic macro, use `doc` parameters for - `macro()` and its attributes. - - * To call a `genrule`, or any other native rule, use `native.`. - - * Use `**kwargs` to forward the extra inherited arguments to the underlying - `genrule` (it works just like in - [Python](https://docs.python.org/3/tutorial/controlflow.html#keyword-arguments)). - This is useful so that a user can set standard attributes like `tags` or - `testonly`. - -Now, use the macro from the `BUILD` file: - -```starlark -load("//path/to:miniature.bzl", "miniature") - -miniature( - name = "logo_miniature", - src = "image.png", -) - -cc_binary( - name = "my_app", - srcs = ["my_app.cc"], - data = [":logo_miniature"], -) -``` diff --git a/8.4.2/rules/performance.mdx b/8.4.2/rules/performance.mdx deleted file mode 100644 index 5870c0d..0000000 --- a/8.4.2/rules/performance.mdx +++ /dev/null @@ -1,302 +0,0 @@ ---- -title: 'Optimizing Performance' ---- - - - -When writing rules, the most common performance pitfall is to traverse or copy -data that is accumulated from dependencies. When aggregated over the whole -build, these operations can easily take O(N^2) time or space. To avoid this, it -is crucial to understand how to use depsets effectively. - -This can be hard to get right, so Bazel also provides a memory profiler that -assists you in finding spots where you might have made a mistake. Be warned: -The cost of writing an inefficient rule may not be evident until it is in -widespread use. - -## Use depsets - -Whenever you are rolling up information from rule dependencies you should use -[depsets](lib/depset). Only use plain lists or dicts to publish information -local to the current rule. - -A depset represents information as a nested graph which enables sharing. - -Consider the following graph: - -``` -C -> B -> A -D ---^ -``` - -Each node publishes a single string. With depsets the data looks like this: - -``` -a = depset(direct=['a']) -b = depset(direct=['b'], transitive=[a]) -c = depset(direct=['c'], transitive=[b]) -d = depset(direct=['d'], transitive=[b]) -``` - -Note that each item is only mentioned once. With lists you would get this: - -``` -a = ['a'] -b = ['b', 'a'] -c = ['c', 'b', 'a'] -d = ['d', 'b', 'a'] -``` - -Note that in this case `'a'` is mentioned four times! With larger graphs this -problem will only get worse. - -Here is an example of a rule implementation that uses depsets correctly to -publish transitive information. Note that it is OK to publish rule-local -information using lists if you want since this is not O(N^2). - -``` -MyProvider = provider() - -def _impl(ctx): - my_things = ctx.attr.things - all_things = depset( - direct=my_things, - transitive=[dep[MyProvider].all_things for dep in ctx.attr.deps] - ) - ... - return [MyProvider( - my_things=my_things, # OK, a flat list of rule-local things only - all_things=all_things, # OK, a depset containing dependencies - )] -``` - -See the [depset overview](/extending/depsets) page for more information. - -### Avoid calling `depset.to_list()` - -You can coerce a depset to a flat list using -[`to_list()`](lib/depset#to_list), but doing so usually results in O(N^2) -cost. If at all possible, avoid any flattening of depsets except for debugging -purposes. - -A common misconception is that you can freely flatten depsets if you only do it -at top-level targets, such as an `_binary` rule, since then the cost is not -accumulated over each level of the build graph. But this is *still* O(N^2) when -you build a set of targets with overlapping dependencies. This happens when -building your tests `//foo/tests/...`, or when importing an IDE project. - -### Reduce the number of calls to `depset` - -Calling `depset` inside a loop is often a mistake. It can lead to depsets with -very deep nesting, which perform poorly. For example: - -```python -x = depset() -for i in inputs: - # Do not do that. - x = depset(transitive = [x, i.deps]) -``` - -This code can be replaced easily. First, collect the transitive depsets and -merge them all at once: - -```python -transitive = [] - -for i in inputs: - transitive.append(i.deps) - -x = depset(transitive = transitive) -``` - -This can sometimes be reduced using a list comprehension: - -```python -x = depset(transitive = [i.deps for i in inputs]) -``` - -## Use ctx.actions.args() for command lines - -When building command lines you should use [ctx.actions.args()](lib/Args). -This defers expansion of any depsets to the execution phase. - -Apart from being strictly faster, this will reduce the memory consumption of -your rules -- sometimes by 90% or more. - -Here are some tricks: - -* Pass depsets and lists directly as arguments, instead of flattening them -yourself. They will get expanded by `ctx.actions.args()` for you. -If you need any transformations on the depset contents, look at -[ctx.actions.args#add](lib/Args#add) to see if anything fits the bill. - -* Are you passing `File#path` as arguments? No need. Any -[File](lib/File) is automatically turned into its -[path](lib/File#path), deferred to expansion time. - -* Avoid constructing strings by concatenating them together. -The best string argument is a constant as its memory will be shared between -all instances of your rule. - -* If the args are too long for the command line an `ctx.actions.args()` object -can be conditionally or unconditionally written to a param file using -[`ctx.actions.args#use_param_file`](lib/Args#use_param_file). This is -done behind the scenes when the action is executed. If you need to explicitly -control the params file you can write it manually using -[`ctx.actions.write`](lib/actions#write). - -Example: - -``` -def _impl(ctx): - ... - args = ctx.actions.args() - file = ctx.declare_file(...) - files = depset(...) - - # Bad, constructs a full string "--foo=" for each rule instance - args.add("--foo=" + file.path) - - # Good, shares "--foo" among all rule instances, and defers file.path to later - # It will however pass ["--foo", ] to the action command line, - # instead of ["--foo="] - args.add("--foo", file) - - # Use format if you prefer ["--foo="] to ["--foo", ] - args.add(format="--foo=%s", value=file) - - # Bad, makes a giant string of a whole depset - args.add(" ".join(["-I%s" % file.short_path for file in files]) - - # Good, only stores a reference to the depset - args.add_all(files, format_each="-I%s", map_each=_to_short_path) - -# Function passed to map_each above -def _to_short_path(f): - return f.short_path -``` - -## Transitive action inputs should be depsets - -When building an action using [ctx.actions.run](lib/actions?#run), do not -forget that the `inputs` field accepts a depset. Use this whenever inputs are -collected from dependencies transitively. - -``` -inputs = depset(...) -ctx.actions.run( - inputs = inputs, # Do *not* turn inputs into a list - ... -) -``` - -## Hanging - -If Bazel appears to be hung, you can hit Ctrl-\ or send -Bazel a `SIGQUIT` signal (`kill -3 $(bazel info server_pid)`) to get a thread -dump in the file `$(bazel info output_base)/server/jvm.out`. - -Since you may not be able to run `bazel info` if bazel is hung, the -`output_base` directory is usually the parent of the `bazel-` -symlink in your workspace directory. - -## Performance profiling - -The [JSON trace profile](/advanced/performance/json-trace-profile) can be very -useful to quickly understand what Bazel spent time on during the invocation. - -The [`--experimental_command_profile`](https://bazel.build/reference/command-line-reference#flag--experimental_command_profile) -flag may be used to capture Java Flight Recorder profiles of various kinds -(cpu time, wall time, memory allocations and lock contention). - -The [`--starlark_cpu_profile`](https://bazel.build/reference/command-line-reference#flag--starlark_cpu_profile) -flag may be used to write a pprof profile of CPU usage by all Starlark threads. - -## Memory profiling - -Bazel comes with a built-in memory profiler that can help you check your rule’s -memory use. If there is a problem you can dump the heap to find the -exact line of code that is causing the problem. - -### Enabling memory tracking - -You must pass these two startup flags to *every* Bazel invocation: - - ``` - STARTUP_FLAGS=\ - --host_jvm_args=-javaagent: \ - --host_jvm_args=-DRULE_MEMORY_TRACKER=1 - ``` -Note: You can download the allocation instrumenter jar file from [Maven Central -Repository][allocation-instrumenter-link]. - -[allocation-instrumenter-link]: https://repo1.maven.org/maven2/com/google/code/java-allocation-instrumenter/java-allocation-instrumenter/3.3.4 - -These start the server in memory tracking mode. If you forget these for even -one Bazel invocation the server will restart and you will have to start over. - -### Using the Memory Tracker - -As an example, look at the target `foo` and see what it does. To only -run the analysis and not run the build execution phase, add the -`--nobuild` flag. - -``` -$ bazel $(STARTUP_FLAGS) build --nobuild //foo:foo -``` - -Next, see how much memory the whole Bazel instance consumes: - -``` -$ bazel $(STARTUP_FLAGS) info used-heap-size-after-gc -> 2594MB -``` - -Break it down by rule class by using `bazel dump --rules`: - -``` -$ bazel $(STARTUP_FLAGS) dump --rules -> - -RULE COUNT ACTIONS BYTES EACH -genrule 33,762 33,801 291,538,824 8,635 -config_setting 25,374 0 24,897,336 981 -filegroup 25,369 25,369 97,496,272 3,843 -cc_library 5,372 73,235 182,214,456 33,919 -proto_library 4,140 110,409 186,776,864 45,115 -android_library 2,621 36,921 218,504,848 83,366 -java_library 2,371 12,459 38,841,000 16,381 -_gen_source 719 2,157 9,195,312 12,789 -_check_proto_library_deps 719 668 1,835,288 2,552 -... (more output) -``` - -Look at where the memory is going by producing a `pprof` file -using `bazel dump --skylark_memory`: - -``` -$ bazel $(STARTUP_FLAGS) dump --skylark_memory=$HOME/prof.gz -> Dumping Starlark heap to: /usr/local/google/home/$USER/prof.gz -``` - -Use the `pprof` tool to investigate the heap. A good starting point is -getting a flame graph by using `pprof -flame $HOME/prof.gz`. - -Get `pprof` from [https://github.com/google/pprof](https://github.com/google/pprof). - -Get a text dump of the hottest call sites annotated with lines: - -``` -$ pprof -text -lines $HOME/prof.gz -> - flat flat% sum% cum cum% - 146.11MB 19.64% 19.64% 146.11MB 19.64% android_library :-1 - 113.02MB 15.19% 34.83% 113.02MB 15.19% genrule :-1 - 74.11MB 9.96% 44.80% 74.11MB 9.96% glob :-1 - 55.98MB 7.53% 52.32% 55.98MB 7.53% filegroup :-1 - 53.44MB 7.18% 59.51% 53.44MB 7.18% sh_test :-1 - 26.55MB 3.57% 63.07% 26.55MB 3.57% _generate_foo_files /foo/tc/tc.bzl:491 - 26.01MB 3.50% 66.57% 26.01MB 3.50% _build_foo_impl /foo/build_test.bzl:78 - 22.01MB 2.96% 69.53% 22.01MB 2.96% _build_foo_impl /foo/build_test.bzl:73 - ... (more output) -``` diff --git a/8.4.2/rules/rules-tutorial.mdx b/8.4.2/rules/rules-tutorial.mdx deleted file mode 100644 index 4c6698e..0000000 --- a/8.4.2/rules/rules-tutorial.mdx +++ /dev/null @@ -1,367 +0,0 @@ ---- -title: 'Rules Tutorial' ---- - - - - -[Starlark](https://github.com/bazelbuild/starlark) is a Python-like -configuration language originally developed for use in Bazel and since adopted -by other tools. Bazel's `BUILD` and `.bzl` files are written in a dialect of -Starlark properly known as the "Build Language", though it is often simply -referred to as "Starlark", especially when emphasizing that a feature is -expressed in the Build Language as opposed to being a built-in or "native" part -of Bazel. Bazel augments the core language with numerous build-related functions -such as `glob`, `genrule`, `java_binary`, and so on. - -See the -[Bazel](/start/) and [Starlark](/extending/concepts) documentation for -more details, and the -[Rules SIG template](https://github.com/bazel-contrib/rules-template) as a -starting point for new rulesets. - -## The empty rule - -To create your first rule, create the file `foo.bzl`: - -```python -def _foo_binary_impl(ctx): - pass - -foo_binary = rule( - implementation = _foo_binary_impl, -) -``` - -When you call the [`rule`](lib/globals#rule) function, you -must define a callback function. The logic will go there, but you -can leave the function empty for now. The [`ctx`](lib/ctx) argument -provides information about the target. - -You can load the rule and use it from a `BUILD` file. - -Create a `BUILD` file in the same directory: - -```python -load(":foo.bzl", "foo_binary") - -foo_binary(name = "bin") -``` - -Now, the target can be built: - -``` -$ bazel build bin -INFO: Analyzed target //:bin (2 packages loaded, 17 targets configured). -INFO: Found 1 target... -Target //:bin up-to-date (nothing to build) -``` - -Even though the rule does nothing, it already behaves like other rules: it has a -mandatory name, it supports common attributes like `visibility`, `testonly`, and -`tags`. - -## Evaluation model - -Before going further, it's important to understand how the code is evaluated. - -Update `foo.bzl` with some print statements: - -```python -def _foo_binary_impl(ctx): - print("analyzing", ctx.label) - -foo_binary = rule( - implementation = _foo_binary_impl, -) - -print("bzl file evaluation") -``` - -and BUILD: - -```python -load(":foo.bzl", "foo_binary") - -print("BUILD file") -foo_binary(name = "bin1") -foo_binary(name = "bin2") -``` - -[`ctx.label`](lib/ctx#label) -corresponds to the label of the target being analyzed. The `ctx` object has -many useful fields and methods; you can find an exhaustive list in the -[API reference](lib/ctx). - -Query the code: - -``` -$ bazel query :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:8:1: bzl file evaluation -DEBUG: /usr/home/bazel-codelab/BUILD:2:1: BUILD file -//:bin2 -//:bin1 -``` - -Make a few observations: - -* "bzl file evaluation" is printed first. Before evaluating the `BUILD` file, - Bazel evaluates all the files it loads. If multiple `BUILD` files are loading - foo.bzl, you would see only one occurrence of "bzl file evaluation" because - Bazel caches the result of the evaluation. -* The callback function `_foo_binary_impl` is not called. Bazel query loads - `BUILD` files, but doesn't analyze targets. - -To analyze the targets, use the [`cquery`](/query/cquery) ("configured -query") or the `build` command: - -``` -$ bazel build :all -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin1 -DEBUG: /usr/home/bazel-codelab/foo.bzl:2:5: analyzing //:bin2 -INFO: Analyzed 2 targets (0 packages loaded, 0 targets configured). -INFO: Found 2 targets... -``` - -As you can see, `_foo_binary_impl` is now called twice - once for each target. - -Notice that neither "bzl file evaluation" nor "BUILD file" are printed again, -because the evaluation of `foo.bzl` is cached after the call to `bazel query`. -Bazel only emits `print` statements when they are actually executed. - -## Creating a file - -To make your rule more useful, update it to generate a file. First, declare the -file and give it a name. In this example, create a file with the same name as -the target: - -```python -ctx.actions.declare_file(ctx.label.name) -``` - -If you run `bazel build :all` now, you will get an error: - -``` -The following files have no generating action: -bin2 -``` - -Whenever you declare a file, you have to tell Bazel how to generate it by -creating an action. Use [`ctx.actions.write`](lib/actions#write), -to create a file with the given content. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello\n", - ) -``` - -The code is valid, but it won't do anything: - -``` -$ bazel build bin1 -Target //:bin1 up-to-date (nothing to build) -``` - -The `ctx.actions.write` function registered an action, which taught Bazel -how to generate the file. But Bazel won't create the file until it is -actually requested. So the last thing to do is tell Bazel that the file -is an output of the rule, and not a temporary file used within the rule -implementation. - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello!\n", - ) - return [DefaultInfo(files = depset([out]))] -``` - -Look at the `DefaultInfo` and `depset` functions later. For now, -assume that the last line is the way to choose the outputs of a rule. - -Now, run Bazel: - -``` -$ bazel build bin1 -INFO: Found 1 target... -Target //:bin1 up-to-date: - bazel-bin/bin1 - -$ cat bazel-bin/bin1 -Hello! -``` - -You have successfully generated a file! - -## Attributes - -To make the rule more useful, add new attributes using -[the `attr` module](lib/attr) and update the rule definition. - -Add a string attribute called `username`: - -```python -foo_binary = rule( - implementation = _foo_binary_impl, - attrs = { - "username": attr.string(), - }, -) -``` - -Next, set it in the `BUILD` file: - -```python -foo_binary( - name = "bin", - username = "Alice", -) -``` - -To access the value in the callback function, use `ctx.attr.username`. For -example: - -```python -def _foo_binary_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name) - ctx.actions.write( - output = out, - content = "Hello {}!\n".format(ctx.attr.username), - ) - return [DefaultInfo(files = depset([out]))] -``` - -Note that you can make the attribute mandatory or set a default value. Look at -the documentation of [`attr.string`](lib/attr#string). -You may also use other types of attributes, such as [boolean](lib/attr#bool) -or [list of integers](lib/attr#int_list). - -## Dependencies - -Dependency attributes, such as [`attr.label`](lib/attr#label) -and [`attr.label_list`](lib/attr#label_list), -declare a dependency from the target that owns the attribute to the target whose -label appears in the attribute's value. This kind of attribute forms the basis -of the target graph. - -In the `BUILD` file, the target label appears as a string object, such as -`//pkg:name`. In the implementation function, the target will be accessible as a -[`Target`](lib/Target) object. For example, view the files returned -by the target using [`Target.files`](lib/Target#modules.Target.files). - -### Multiple files - -By default, only targets created by rules may appear as dependencies (such as a -`foo_library()` target). If you want the attribute to accept targets that are -input files (such as source files in the repository), you can do it with -`allow_files` and specify the list of accepted file extensions (or `True` to -allow any file extension): - -```python -"srcs": attr.label_list(allow_files = [".java"]), -``` - -The list of files can be accessed with `ctx.files.`. For -example, the list of files in the `srcs` attribute can be accessed through - -```python -ctx.files.srcs -``` - -### Single file - -If you need only one file, use `allow_single_file`: - -```python -"src": attr.label(allow_single_file = [".java"]) -``` - -This file is then accessible under `ctx.file.`: - -```python -ctx.file.src -``` - -## Create a file with a template - -You can create a rule that generates a .cc file based on a template. Also, you -can use `ctx.actions.write` to output a string constructed in the rule -implementation function, but this has two problems. First, as the template gets -bigger, it becomes more memory efficient to put it in a separate file and avoid -constructing large strings during the analysis phase. Second, using a separate -file is more convenient for the user. Instead, use -[`ctx.actions.expand_template`](lib/actions#expand_template), -which performs substitutions on a template file. - -Create a `template` attribute to declare a dependency on the template -file: - -```python -def _hello_world_impl(ctx): - out = ctx.actions.declare_file(ctx.label.name + ".cc") - ctx.actions.expand_template( - output = out, - template = ctx.file.template, - substitutions = {"{NAME}": ctx.attr.username}, - ) - return [DefaultInfo(files = depset([out]))] - -hello_world = rule( - implementation = _hello_world_impl, - attrs = { - "username": attr.string(default = "unknown person"), - "template": attr.label( - allow_single_file = [".cc.tpl"], - mandatory = True, - ), - }, -) -``` - -Users can use the rule like this: - -```python -hello_world( - name = "hello", - username = "Alice", - template = "file.cc.tpl", -) - -cc_binary( - name = "hello_bin", - srcs = [":hello"], -) -``` - -If you don't want to expose the template to the end-user and always use the -same one, you can set a default value and make the attribute private: - -```python - "_template": attr.label( - allow_single_file = True, - default = "file.cc.tpl", - ), -``` - -Attributes that start with an underscore are private and cannot be set in a -`BUILD` file. The template is now an _implicit dependency_: Every `hello_world` -target has a dependency on this file. Don't forget to make this file visible -to other packages by updating the `BUILD` file and using -[`exports_files`](/reference/be/functions#exports_files): - -```python -exports_files(["file.cc.tpl"]) -``` - -## Going further - -* Take a look at the [reference documentation for rules](/extending/rules#contents). -* Get familiar with [depsets](/extending/depsets). -* Check out the [examples repository](https://github.com/bazelbuild/examples/tree/master/rules) - which includes additional examples of rules. diff --git a/8.4.2/rules/testing.mdx b/8.4.2/rules/testing.mdx deleted file mode 100644 index 2996e08..0000000 --- a/8.4.2/rules/testing.mdx +++ /dev/null @@ -1,474 +0,0 @@ ---- -title: 'Testing' ---- - - - -There are several different approaches to testing Starlark code in Bazel. This -page gathers the current best practices and frameworks by use case. - -## Testing rules - -[Skylib](https://github.com/bazelbuild/bazel-skylib) has a test framework called -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -for checking the analysis-time behavior of rules, such as their actions and -providers. Such tests are called "analysis tests" and are currently the best -option for testing the inner workings of rules. - -Some caveats: - -* Test assertions occur within the build, not a separate test runner process. - Targets that are created by the test must be named such that they do not - collide with targets from other tests or from the build. An error that - occurs during the test is seen by Bazel as a build breakage rather than a - test failure. - -* It requires a fair amount of boilerplate to set up the rules under test and - the rules containing test assertions. This boilerplate may seem daunting at - first. It helps to [keep in mind](/extending/concepts#evaluation-model) that macros - are evaluated and targets generated during the loading phase, while rule - implementation functions don't run until later, during the analysis phase. - -* Analysis tests are intended to be fairly small and lightweight. Certain - features of the analysis testing framework are restricted to verifying - targets with a maximum number of transitive dependencies (currently 500). - This is due to performance implications of using these features with larger - tests. - -The basic principle is to define a testing rule that depends on the -rule-under-test. This gives the testing rule access to the rule-under-test's -providers. - -The testing rule's implementation function carries out assertions. If there are -any failures, these are not raised immediately by calling `fail()` (which would -trigger an analysis-time build error), but rather by storing the errors in a -generated script that fails at test execution time. - -See below for a minimal toy example, followed by an example that checks actions. - -### Minimal example - -`//mypkg/myrules.bzl`: - -```python -MyInfo = provider(fields = { - "val": "string value", - "out": "output File", -}) - -def _myrule_impl(ctx): - """Rule that just generates a file and returns a provider.""" - out = ctx.actions.declare_file(ctx.label.name + ".out") - ctx.actions.write(out, "abc") - return [MyInfo(val="some value", out=out)] - -myrule = rule( - implementation = _myrule_impl, -) -``` - -`//mypkg/myrules_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "analysistest") -load(":myrules.bzl", "myrule", "MyInfo") - -# ==== Check the provider contents ==== - -def _provider_contents_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - # If preferred, could pass these values as "expected" and "actual" keyword - # arguments. - asserts.equals(env, "some value", target_under_test[MyInfo].val) - - # If you forget to return end(), you will get an error about an analysis - # test needing to return an instance of AnalysisTestResultInfo. - return analysistest.end(env) - -# Create the testing rule to wrap the test logic. This must be bound to a global -# variable, not called in a macro's body, since macros get evaluated at loading -# time but the rule gets evaluated later, at analysis time. Since this is a test -# rule, its name must end with "_test". -provider_contents_test = analysistest.make(_provider_contents_test_impl) - -# Macro to setup the test. -def _test_provider_contents(): - # Rule under test. Be sure to tag 'manual', as this target should not be - # built using `:all` except as a dependency of the test. - myrule(name = "provider_contents_subject", tags = ["manual"]) - # Testing rule. - provider_contents_test(name = "provider_contents_test", - target_under_test = ":provider_contents_subject") - # Note the target_under_test attribute is how the test rule depends on - # the real rule target. - -# Entry point from the BUILD file; macro for running each test case's macro and -# declaring a test suite that wraps them together. -def myrules_test_suite(name): - # Call all test functions and wrap their targets in a suite. - _test_provider_contents() - # ... - - native.test_suite( - name = name, - tests = [ - ":provider_contents_test", - # ... - ], - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myrules.bzl", "myrule") -load(":myrules_test.bzl", "myrules_test_suite") - -# Production use of the rule. -myrule( - name = "mytarget", -) - -# Call a macro that defines targets that perform the tests at analysis time, -# and that can be executed with "bazel test" to return the result. -myrules_test_suite(name = "myrules_test") -``` - -The test can be run with `bazel test //mypkg:myrules_test`. - -Aside from the initial `load()` statements, there are two main parts to the -file: - -* The tests themselves, each of which consists of 1) an analysis-time - implementation function for the testing rule, 2) a declaration of the - testing rule via `analysistest.make()`, and 3) a loading-time function - (macro) for declaring the rule-under-test (and its dependencies) and testing - rule. If the assertions do not change between test cases, 1) and 2) may be - shared by multiple test cases. - -* The test suite function, which calls the loading-time functions for each - test, and declares a `test_suite` target bundling all tests together. - -For consistency, follow the recommended naming convention: Let `foo` stand for -the part of the test name that describes what the test is checking -(`provider_contents` in the above example). For example, a JUnit test method -would be named `testFoo`. - -Then: - -* the macro which generates the test and target under test should should be - named `_test_foo` (`_test_provider_contents`) - -* its test rule type should be named `foo_test` (`provider_contents_test`) - -* the label of the target of this rule type should be `foo_test` - (`provider_contents_test`) - -* the implementation function for the testing rule should be named - `_foo_test_impl` (`_provider_contents_test_impl`) - -* the labels of the targets of the rules under test and their dependencies - should be prefixed with `foo_` (`provider_contents_`) - -Note that the labels of all targets can conflict with other labels in the same -BUILD package, so it's helpful to use a unique name for the test. - -### Failure testing - -It may be useful to verify that a rule fails given certain inputs or in certain -state. This can be done using the analysis test framework: - -The test rule created with `analysistest.make` should specify `expect_failure`: - -```python -failure_testing_test = analysistest.make( - _failure_testing_test_impl, - expect_failure = True, -) -``` - -The test rule implementation should make assertions on the nature of the failure -that took place (specifically, the failure message): - -```python -def _failure_testing_test_impl(ctx): - env = analysistest.begin(ctx) - asserts.expect_failure(env, "This rule should never work") - return analysistest.end(env) -``` - -Also make sure that your target under test is specifically tagged 'manual'. -Without this, building all targets in your package using `:all` will result in a -build of the intentionally-failing target and will exhibit a build failure. With -'manual', your target under test will build only if explicitly specified, or as -a dependency of a non-manual target (such as your test rule): - -```python -def _test_failure(): - myrule(name = "this_should_fail", tags = ["manual"]) - - failure_testing_test(name = "failure_testing_test", - target_under_test = ":this_should_fail") - -# Then call _test_failure() in the macro which generates the test suite and add -# ":failure_testing_test" to the suite's test targets. -``` - -### Verifying registered actions - -You may want to write tests which make assertions about the actions that your -rule registers, for example, using `ctx.actions.run()`. This can be done in your -analysis test rule implementation function. An example: - -```python -def _inspect_actions_test_impl(ctx): - env = analysistest.begin(ctx) - - target_under_test = analysistest.target_under_test(env) - actions = analysistest.target_actions(env) - asserts.equals(env, 1, len(actions)) - action_output = actions[0].outputs.to_list()[0] - asserts.equals( - env, target_under_test.label.name + ".out", action_output.basename) - return analysistest.end(env) -``` - -Note that `analysistest.target_actions(env)` returns a list of -[`Action`](lib/Action) objects which represent actions registered by the -target under test. - -### Verifying rule behavior under different flags - -You may want to verify your real rule behaves a certain way given certain build -flags. For example, your rule may behave differently if a user specifies: - -```shell -bazel build //mypkg:real_target -c opt -``` - -versus - -```shell -bazel build //mypkg:real_target -c dbg -``` - -At first glance, this could be done by testing the target under test using the -desired build flags: - -```shell -bazel test //mypkg:myrules_test -c opt -``` - -But then it becomes impossible for your test suite to simultaneously contain a -test which verifies the rule behavior under `-c opt` and another test which -verifies the rule behavior under `-c dbg`. Both tests would not be able to run -in the same build! - -This can be solved by specifying the desired build flags when defining the test -rule: - -```python -myrule_c_opt_test = analysistest.make( - _myrule_c_opt_test_impl, - config_settings = { - "//command_line_option:compilation_mode": "opt", - }, -) -``` - -Normally, a target under test is analyzed given the current build flags. -Specifying `config_settings` overrides the values of the specified command line -options. (Any unspecified options will retain their values from the actual -command line). - -In the specified `config_settings` dictionary, command line flags must be -prefixed with a special placeholder value `//command_line_option:`, as is shown -above. - - -## Validating artifacts - -The main ways to check that your generated files are correct are: - -* You can write a test script in shell, Python, or another language, and - create a target of the appropriate `*_test` rule type. - -* You can use a specialized rule for the kind of test you want to perform. - -### Using a test target - -The most straightforward way to validate an artifact is to write a script and -add a `*_test` target to your BUILD file. The specific artifacts you want to -check should be data dependencies of this target. If your validation logic is -reusable for multiple tests, it should be a script that takes command line -arguments that are controlled by the test target's `args` attribute. Here's an -example that validates that the output of `myrule` from above is `"abc"`. - -`//mypkg/myrule_validator.sh`: - -```shell -if [ "$(cat $1)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed for each target whose artifacts are to be checked. -sh_test( - name = "validate_mytarget", - srcs = [":myrule_validator.sh"], - args = ["$(location :mytarget.out)"], - data = [":mytarget.out"], -) -``` - -### Using a custom rule - -A more complicated alternative is to write the shell script as a template that -gets instantiated by a new rule. This involves more indirection and Starlark -logic, but leads to cleaner BUILD files. As a side-benefit, any argument -preprocessing can be done in Starlark instead of the script, and the script is -slightly more self-documenting since it uses symbolic placeholders (for -substitutions) instead of numeric ones (for arguments). - -`//mypkg/myrule_validator.sh.template`: - -```shell -if [ "$(cat %TARGET%)" = "abc" ]; then - echo "Passed" - exit 0 -else - echo "Failed" - exit 1 -fi -``` - -`//mypkg/myrule_validation.bzl`: - -```python -def _myrule_validation_test_impl(ctx): - """Rule for instantiating myrule_validator.sh.template for a given target.""" - exe = ctx.outputs.executable - target = ctx.file.target - ctx.actions.expand_template(output = exe, - template = ctx.file._script, - is_executable = True, - substitutions = { - "%TARGET%": target.short_path, - }) - # This is needed to make sure the output file of myrule is visible to the - # resulting instantiated script. - return [DefaultInfo(runfiles=ctx.runfiles(files=[target]))] - -myrule_validation_test = rule( - implementation = _myrule_validation_test_impl, - attrs = {"target": attr.label(allow_single_file=True), - # You need an implicit dependency in order to access the template. - # A target could potentially override this attribute to modify - # the test logic. - "_script": attr.label(allow_single_file=True, - default=Label("//mypkg:myrule_validator"))}, - test = True, -) -``` - -`//mypkg/BUILD`: - -```python -... - -myrule( - name = "mytarget", -) - -... - -# Needed just once, to expose the template. Could have also used export_files(), -# and made the _script attribute set allow_files=True. -filegroup( - name = "myrule_validator", - srcs = [":myrule_validator.sh.template"], -) - -# Needed for each target whose artifacts are to be checked. Notice that you no -# longer have to specify the output file name in a data attribute, or its -# $(location) expansion in an args attribute, or the label for the script -# (unless you want to override it). -myrule_validation_test( - name = "validate_mytarget", - target = ":mytarget", -) -``` - -Alternatively, instead of using a template expansion action, you could have -inlined the template into the .bzl file as a string and expanded it during the -analysis phase using the `str.format` method or `%`-formatting. - -## Testing Starlark utilities - -[Skylib](https://github.com/bazelbuild/bazel-skylib)'s -[`unittest.bzl`](https://github.com/bazelbuild/bazel-skylib/blob/main/lib/unittest.bzl) -framework can be used to test utility functions (that is, functions that are -neither macros nor rule implementations). Instead of using `unittest.bzl`'s -`analysistest` library, `unittest` may be used. For such test suites, the -convenience function `unittest.suite()` can be used to reduce boilerplate. - -`//mypkg/myhelpers.bzl`: - -```python -def myhelper(): - return "abc" -``` - -`//mypkg/myhelpers_test.bzl`: - - -```python -load("@bazel_skylib//lib:unittest.bzl", "asserts", "unittest") -load(":myhelpers.bzl", "myhelper") - -def _myhelper_test_impl(ctx): - env = unittest.begin(ctx) - asserts.equals(env, "abc", myhelper()) - return unittest.end(env) - -myhelper_test = unittest.make(_myhelper_test_impl) - -# No need for a test_myhelper() setup function. - -def myhelpers_test_suite(name): - # unittest.suite() takes care of instantiating the testing rules and creating - # a test_suite. - unittest.suite( - name, - myhelper_test, - # ... - ) -``` - -`//mypkg/BUILD`: - -```python -load(":myhelpers_test.bzl", "myhelpers_test_suite") - -myhelpers_test_suite(name = "myhelpers_tests") -``` - -For more examples, see Skylib's own [tests](https://github.com/bazelbuild/bazel-skylib/blob/main/tests/BUILD). diff --git a/8.4.2/rules/verbs-tutorial.mdx b/8.4.2/rules/verbs-tutorial.mdx deleted file mode 100644 index db7757e..0000000 --- a/8.4.2/rules/verbs-tutorial.mdx +++ /dev/null @@ -1,177 +0,0 @@ ---- -title: 'Using Macros to Create Custom Verbs' ---- - - - -Day-to-day interaction with Bazel happens primarily through a few commands: -`build`, `test`, and `run`. At times, though, these can feel limited: you may -want to push packages to a repository, publish documentation for end-users, or -deploy an application with Kubernetes. But Bazel doesn't have a `publish` or -`deploy` command – where do these actions fit in? - -## The bazel run command - -Bazel's focus on hermeticity, reproducibility, and incrementality means the -`build` and `test` commands aren't helpful for the above tasks. These actions -may run in a sandbox, with limited network access, and aren't guaranteed to be -re-run with every `bazel build`. - -Instead, rely on `bazel run`: the workhorse for tasks that you *want* to have -side effects. Bazel users are accustomed to rules that create executables, and -rule authors can follow a common set of patterns to extend this to -"custom verbs". - -### In the wild: rules_k8s -For example, consider [`rules_k8s`](https://github.com/bazelbuild/rules_k8s), -the Kubernetes rules for Bazel. Suppose you have the following target: - -```python -# BUILD file in //application/k8s -k8s_object( - name = "staging", - kind = "deployment", - cluster = "testing", - template = "deployment.yaml", -) -``` - -The [`k8s_object` rule](https://github.com/bazelbuild/rules_k8s#usage) builds a -standard Kubernetes YAML file when `bazel build` is used on the `staging` -target. However, the additional targets are also created by the `k8s_object` -macro with names like `staging.apply` and `:staging.delete`. These build -scripts to perform those actions, and when executed with `bazel run -staging.apply`, these behave like our own `bazel k8s-apply` or `bazel -k8s-delete` commands. - -### Another example: ts_api_guardian_test - -This pattern can also be seen in the Angular project. The -[`ts_api_guardian_test` macro](https://github.com/angular/angular/blob/16ac611a8410e6bcef8ffc779f488ca4fa102155/tools/ts-api-guardian/index.bzl#L22) -produces two targets. The first is a standard `nodejs_test` target which compares -some generated output against a "golden" file (that is, a file containing the -expected output). This can be built and run with a normal `bazel -test` invocation. In `angular-cli`, you can run [one such -target](https://github.com/angular/angular-cli/blob/e1269cb520871ee29b1a4eec6e6c0e4a94f0b5fc/etc/api/BUILD) -with `bazel test //etc/api:angular_devkit_core_api`. - -Over time, this golden file may need to be updated for legitimate reasons. -Updating this manually is tedious and error-prone, so this macro also provides -a `nodejs_binary` target that updates the golden file, instead of comparing -against it. Effectively, the same test script can be written to run in "verify" -or "accept" mode, based on how it's invoked. This follows the same pattern -you've learned already: there is no native `bazel test-accept` command, but the -same effect can be achieved with -`bazel run //etc/api:angular_devkit_core_api.accept`. - -This pattern can be quite powerful, and turns out to be quite common once you -learn to recognize it. - -## Adapting your own rules - -[Macros](/extending/macros) are the heart of this pattern. Macros are used like -rules, but they can create several targets. Typically, they will create a -target with the specified name which performs the primary build action: perhaps -it builds a normal binary, a Docker image, or an archive of source code. In -this pattern, additional targets are created to produce scripts performing side -effects based on the output of the primary target, like publishing the -resulting binary or updating the expected test output. - -To illustrate this, wrap an imaginary rule that generates a website with -[Sphinx](https://www.sphinx-doc.org) with a macro to create an additional -target that allows the user to publish it when ready. Consider the following -existing rule for generating a website with Sphinx: - -```python -_sphinx_site = rule( - implementation = _sphinx_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, -) -``` - -Next, consider a rule like the following, which builds a script that, when run, -publishes the generated pages: - -```python -_sphinx_publisher = rule( - implementation = _publish_impl, - attrs = { - "site": attr.label(), - "_publisher": attr.label( - default = "//internal/sphinx:publisher", - executable = True, - ), - }, - executable = True, -) -``` - -Finally, define the following symbolic macro (available in Bazel 8 or newer) to -create targets for both of the above rules together: - -```starlark -def _sphinx_site_impl(name, visibility, srcs, **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. We - # set `visibility = visibility` to make it visible to callers of the - # macro. - _sphinx_site(name = name, visibility = visibility, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. We don't want it to be visible to callers of - # our macro, so we omit visibility for it. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) - -sphinx_site = macro( - implementation = _sphinx_site_impl, - attrs = {"srcs": attr.label_list(allow_files = [".rst"])}, - # Inherit common attributes like tags and testonly - inherit_attrs = "common", -) -``` - -Or, if you need to support Bazel releases older than Bazel 8, you would instead -define a legacy macro: - -```starlark -def sphinx_site(name, srcs = [], **kwargs): - # This creates the primary target, producing the Sphinx-generated HTML. - _sphinx_site(name = name, srcs = srcs, **kwargs) - # This creates the secondary target, which produces a script for publishing - # the site generated above. - _sphinx_publisher(name = "%s.publish" % name, site = name, **kwargs) -``` - -In the `BUILD` files, use the macro as though it just creates the primary -target: - -```python -sphinx_site( - name = "docs", - srcs = ["index.md", "providers.md"], -) -``` - -In this example, a "docs" target is created, just as though the macro were a -standard, single Bazel rule. When built, the rule generates some configuration -and runs Sphinx to produce an HTML site, ready for manual inspection. However, -an additional "docs.publish" target is also created, which builds a script for -publishing the site. Once you check the output of the primary target, you can -use `bazel run :docs.publish` to publish it for public consumption, just like -an imaginary `bazel publish` command. - -It's not immediately obvious what the implementation of the `_sphinx_publisher` -rule might look like. Often, actions like this write a _launcher_ shell script. -This method typically involves using -[`ctx.actions.expand_template`](lib/actions#expand_template) -to write a very simple shell script, in this case invoking the publisher binary -with a path to the output of the primary target. This way, the publisher -implementation can remain generic, the `_sphinx_site` rule can just produce -HTML, and this small script is all that's necessary to combine the two -together. - -In `rules_k8s`, this is indeed what `.apply` does: -[`expand_template`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/object.bzl#L213-L241) -writes a very simple Bash script, based on -[`apply.sh.tpl`](https://github.com/bazelbuild/rules_k8s/blob/f10e7025df7651f47a76abf1db5ade1ffeb0c6ac/k8s/apply.sh.tpl), -which runs `kubectl` with the output of the primary target. This script can -then be build and run with `bazel run :staging.apply`, effectively providing a -`k8s-apply` command for `k8s_object` targets. diff --git a/8.4.2/run/bazelrc.mdx b/8.4.2/run/bazelrc.mdx deleted file mode 100644 index 15f89c8..0000000 --- a/8.4.2/run/bazelrc.mdx +++ /dev/null @@ -1,260 +0,0 @@ ---- -title: 'Write bazelrc configuration files' ---- - - - -Bazel accepts many options. Some options are varied frequently (for example, -`--subcommands`) while others stay the same across several builds (such as -`--package_path`). To avoid specifying these unchanged options for every build -(and other commands), you can specify options in a configuration file, called -`.bazelrc`. - -### Where are the `.bazelrc` files? - -Bazel looks for optional configuration files in the following locations, -in the order shown below. The options are interpreted in this order, so -options in later files can override a value from an earlier file if a -conflict arises. All options that control which of these files are loaded are -startup options, which means they must occur after `bazel` and -before the command (`build`, `test`, etc). - -1. **The system RC file**, unless `--nosystem_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `/etc/bazel.bazelrc` - - On Windows: `%ProgramData%\bazel.bazelrc` - - It is not an error if this file does not exist. - - If another system-specified location is required, you must build a custom - Bazel binary, overriding the `BAZEL_SYSTEM_BAZELRC_PATH` value in - [`//src/main/cpp:option_processor`](https://github.com/bazelbuild/bazel/blob/0.28.0/src/main/cpp/BUILD#L141). - The system-specified location may contain environment variable references, - such as `${VAR_NAME}` on Unix or `%VAR_NAME%` on Windows. - -2. **The workspace RC file**, unless `--noworkspace_rc` is present. - - Path: `.bazelrc` in your workspace directory (next to the main - `MODULE.bazel` file). - - It is not an error if this file does not exist. - -3. **The home RC file**, unless `--nohome_rc` is present. - - Path: - - - On Linux/macOS/Unixes: `$HOME/.bazelrc` - - On Windows: `%USERPROFILE%\.bazelrc` if exists, or `%HOME%/.bazelrc` - - It is not an error if this file does not exist. - -4. **The user-specified RC file**, if specified with - --bazelrc=file - - This flag is optional but can also be specified multiple times. - - `/dev/null` indicates that all further `--bazelrc`s will be ignored, which - is useful to disable the search for a user rc file, such as in release - builds. - - For example: - - ``` - --bazelrc=x.rc --bazelrc=y.rc --bazelrc=/dev/null --bazelrc=z.rc - ``` - - - `x.rc` and `y.rc` are read. - - `z.rc` is ignored due to the prior `/dev/null`. - -In addition to this optional configuration file, Bazel looks for a global rc -file. For more details, see the [global bazelrc section](#global-bazelrc). - - -### `.bazelrc` syntax and semantics - -Like all UNIX "rc" files, the `.bazelrc` file is a text file with a line-based -grammar. Empty lines and lines starting with `#` (comments) are ignored. Each -line contains a sequence of words, which are tokenized according to the same -rules as the Bourne shell. - -#### Imports - -Lines that start with `import` or `try-import` are special: use these to load -other "rc" files. To specify a path that is relative to the workspace root, -write `import %workspace%/path/to/bazelrc`. - -The difference between `import` and `try-import` is that Bazel fails if the -`import`'ed file is missing (or can't be read), but not so for a `try-import`'ed -file. - -Import precedence: - -- Options in the imported file take precedence over options specified before - the import statement. -- Options specified after the import statement take precedence over the - options in the imported file. -- Options in files imported later take precedence over files imported earlier. - -#### Option defaults - -Most lines of a bazelrc define default option values. The first word on each -line specifies when these defaults are applied: - -- `startup`: startup options, which go before the command, and are described - in `bazel help startup_options`. -- `common`: options that should be applied to all Bazel commands that support - them. If a command does not support an option specified in this way, the - option is ignored so long as it is valid for *some* other Bazel command. - Note that this only applies to option names: If the current command accepts - an option with the specified name, but doesn't support the specified value, - it will fail. -- `always`: options that apply to all Bazel commands. If a command does not - support an option specified in this way, it will fail. -- _`command`_: Bazel command, such as `build` or `query` to which the options - apply. These options also apply to all commands that inherit from the - specified command. (For example, `test` inherits from `build`.) - -Each of these lines may be used more than once and the arguments that follow the -first word are combined as if they had appeared on a single line. (Users of CVS, -another tool with a "Swiss army knife" command-line interface, will find the -syntax similar to that of `.cvsrc`.) For example, the lines: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures - -build --test_tmpdir=/tmp/bar -``` - -are combined as: - -```posix-terminal -build --test_tmpdir=/tmp/foo --verbose_failures --test_tmpdir=/tmp/bar -``` - -so the effective flags are `--verbose_failures` and `--test_tmpdir=/tmp/bar`. - -Option precedence: - -- Options on the command line always take precedence over those in rc files. - For example, if a rc file says `build -c opt` but the command line flag is - `-c dbg`, the command line flag takes precedence. -- Within the rc file, precedence is governed by specificity: lines for a more - specific command take precedence over lines for a less specific command. - - Specificity is defined by inheritance. Some commands inherit options from - other commands, making the inheriting command more specific than the base - command. For example `test` inherits from the `build` command, so all `bazel - build` flags are valid for `bazel test`, and all `build` lines apply also to - `bazel test` unless there's a `test` line for the same option. If the rc - file says: - - ```posix-terminal - test -c dbg --test_env=PATH - - build -c opt --verbose_failures - ``` - - then `bazel build //foo` will use `-c opt --verbose_failures`, and `bazel - test //foo` will use `--verbose_failures -c dbg --test_env=PATH`. - - The inheritance (specificity) graph is: - - * Every command inherits from `common` - * The following commands inherit from (and are more specific than) - `build`: `test`, `run`, `clean`, `mobile-install`, `info`, - `print_action`, `config`, `cquery`, and `aquery` - * `coverage`, `fetch`, and `vendor` inherit from `test` - -- Two lines specifying options for the same command at equal specificity are - parsed in the order in which they appear within the file. - -- Because this precedence rule does not match the file order, it helps - readability if you follow the precedence order within rc files: start with - `common` options at the top, and end with the most-specific commands at the - bottom of the file. This way, the order in which the options are read is the - same as the order in which they are applied, which is more intuitive. - -The arguments specified on a line of an rc file may include arguments that are -not options, such as the names of build targets, and so on. These, like the -options specified in the same files, have lower precedence than their siblings -on the command line, and are always prepended to the explicit list of non- -option arguments. - -#### `--config` - -In addition to setting option defaults, the rc file can be used to group options -and provide a shorthand for common groupings. This is done by adding a `:name` -suffix to the command. These options are ignored by default, but will be -included when the option --config=name is present, -either on the command line or in a `.bazelrc` file, recursively, even inside of -another config definition. The options specified by `command:name` will only be -expanded for applicable commands, in the precedence order described above. - -Note: Configs can be defined in any `.bazelrc` file, and that all lines of -the form `command:name` (for applicable commands) will be expanded, across the -different rc files. In order to avoid name conflicts, we suggest that configs -defined in personal rc files start with an underscore (`_`) to avoid -unintentional name sharing. - -`--config=foo` expands to the options defined in -[the rc files](#bazelrc-file-locations) "in-place" so that the options -specified for the config have the same precedence that the `--config=foo` option -had. - -This syntax does not extend to the use of `startup` to set -[startup options](#option-defaults). Setting -`startup:config-name --some_startup_option` in the .bazelrc will be ignored. - -#### `--enable_platform_specific_config` - -Platform specific configs in the `.bazelrc` can be automatically enabled using -`--enable_platform_specific_config`. For example, if the host OS is Linux and -the `build` command is run, the `build:linux` configuration will be -automatically enabled. Supported OS identifiers are `linux`, `macos`, `windows`, -`freebsd`, and `openbsd`. Enabling this flag is equivalent to using -`--config=linux` on Linux, `--config=windows` on Windows, and so on. - -See [--enable_platform_specific_config](/reference/command-line-reference#flag--enable_platform_specific_config). - -#### Example - -Here's an example `~/.bazelrc` file: - -``` -# Bob's Bazel option defaults - -startup --host_jvm_args=-XX:-UseParallelGC -import /home/bobs_project/bazelrc -build --show_timestamps --keep_going --jobs 600 -build --color=yes -query --keep_going - -# Definition of --config=memcheck -build:memcheck --strip=never --test_timeout=3600 -``` - -### Other files governing Bazel's behavior - -#### `.bazelignore` - -You can specify directories within the workspace -that you want Bazel to ignore, such as related projects -that use other build systems. Place a file called -`.bazelignore` at the root of the workspace -and add the directories you want Bazel to ignore, one per -line. Entries are relative to the workspace root. - -### The global bazelrc file - -Bazel reads optional bazelrc files in this order: - -1. System rc-file located at `etc/bazel.bazelrc`. -2. Workspace rc-file located at `$workspace/tools/bazel.rc`. -3. Home rc-file located at `$HOME/.bazelrc` - -Each bazelrc file listed here has a corresponding flag which can be used to -disable them (e.g. `--nosystem_rc`, `--noworkspace_rc`, `--nohome_rc`). You can -also make Bazel ignore all bazelrcs by passing the `--ignore_all_rc_files` -startup option. diff --git a/8.4.2/run/client-server.mdx b/8.4.2/run/client-server.mdx deleted file mode 100644 index 1868635..0000000 --- a/8.4.2/run/client-server.mdx +++ /dev/null @@ -1,58 +0,0 @@ ---- -title: 'Client/server implementation' ---- - - - -The Bazel system is implemented as a long-lived server process. This allows it -to perform many optimizations not possible with a batch-oriented implementation, -such as caching of BUILD files, dependency graphs, and other metadata from one -build to the next. This improves the speed of incremental builds, and allows -different commands, such as `build` and `query` to share the same cache of -loaded packages, making queries very fast. Each server can handle at most one -invocation at a time; further concurrent invocations will either block or -fail-fast (see `--block_for_lock`). - -When you run `bazel`, you're running the client. The client finds the server -based on the [output base](/run/scripts#output-base-option), which by default is -determined by the path of the base workspace directory and your userid, so if -you build in multiple workspaces, you'll have multiple output bases and thus -multiple Bazel server processes. Multiple users on the same workstation can -build concurrently in the same workspace because their output bases will differ -(different userids). - -If the client cannot find a running server instance, it starts a new one. It -does this by checking if the output base already exists, implying the blaze -archive has already been unpacked. Otherwise if the output base doesn't exist, -the client unzips the archive's files and sets their `mtime`s to a date 9 years -in the future. Once installed, the client confirms that the `mtime`s of the -unzipped files are equal to the far off date to ensure no installation tampering -has occurred. - -The server process will stop after a period of inactivity (3 hours, by default, -which can be modified using the startup option `--max_idle_secs`). For the most -part, the fact that there is a server running is invisible to the user, but -sometimes it helps to bear this in mind. For example, if you're running scripts -that perform a lot of automated builds in different directories, it's important -to ensure that you don't accumulate a lot of idle servers; you can do this by -explicitly shutting them down when you're finished with them, or by specifying -a short timeout period. - -The name of a Bazel server process appears in the output of `ps x` or `ps -e f` -as bazel(dirname), where _dirname_ is the basename of the -directory enclosing the root of your workspace directory. For example: - -```posix-terminal -ps -e f -16143 ? Sl 3:00 bazel(src-johndoe2) -server -Djava.library.path=... -``` - -This makes it easier to find out which server process belongs to a given -workspace. (Beware that with certain other options to `ps`, Bazel server -processes may be named just `java`.) Bazel servers can be stopped using the -[shutdown](/docs/user-manual#shutdown) command. - -When running `bazel`, the client first checks that the server is the appropriate -version; if not, the server is stopped and a new one started. This ensures that -the use of a long-running server process doesn't interfere with proper -versioning. diff --git a/8.4.2/run/scripts.mdx b/8.4.2/run/scripts.mdx deleted file mode 100644 index f267c90..0000000 --- a/8.4.2/run/scripts.mdx +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: 'Calling Bazel from scripts' ---- - - - -You can call Bazel from scripts to perform a build, run tests, or query -the dependency graph. Bazel has been designed to enable effective scripting, but -this section lists some details to bear in mind to make your scripts more -robust. - -### Choosing the output base - -The `--output_base` option controls where the Bazel process should write the -outputs of a build to, as well as various working files used internally by -Bazel, one of which is a lock that guards against concurrent mutation of the -output base by multiple Bazel processes. - -Choosing the correct output base directory for your script depends on several -factors. If you need to put the build outputs in a specific location, this will -dictate the output base you need to use. If you are making a "read only" call to -Bazel (such as `bazel query`), the locking factors will be more important. In -particular, if you need to run multiple instances of your script concurrently, -you should be mindful that each Blaze server process can handle at most one -invocation [at a time](/run/client-server#clientserver-implementation). -Depending on your situation it may make sense for each instance of your script -to wait its turn, or it may make sense to use `--output_base` to run multiple -Blaze servers and use those. - -If you use the default output base value, you will be contending for the same -lock used by the user's interactive Bazel commands. If the user issues -long-running commands such as builds, your script will have to wait for those -commands to complete before it can continue. - -### Notes about server mode - -By default, Bazel uses a long-running [server process](/run/client-server) as an -optimization. When running Bazel in a script, don't forget to call `shutdown` -when you're finished with the server, or, specify `--max_idle_secs=5` so that -idle servers shut themselves down promptly. - -### What exit code will I get? - -Bazel attempts to differentiate failures due to the source code under -consideration from external errors that prevent Bazel from executing properly. -Bazel execution can result in following exit codes: - -**Exit Codes common to all commands:** - -- `0` - Success -- `2` - Command Line Problem, Bad or Illegal flags or command combination, or - Bad Environment Variables. Your command line must be modified. -- `8` - Build Interrupted but we terminated with an orderly shutdown. -- `9` - The server lock is held and `--noblock_for_lock` was passed. -- `32` - External Environment Failure not on this machine. - -- `33` - Bazel ran out of memory and crashed. You need to modify your command line. -- `34` - Reserved for Google-internal use. -- `35` - Reserved for Google-internal use. -- `36` - Local Environmental Issue, suspected permanent. -- `37` - Unhandled Exception / Internal Bazel Error. -- `38` - Transient error publishing results to the Build Event Service. -- `39` - Blobs required by Bazel are evicted from Remote Cache. -- `41-44` - Reserved for Google-internal use. -- `45` - Persistent error publishing results to the Build Event Service. -- `47` - Reserved for Google-internal use. -- `49` - Reserved for Google-internal use. - -**Return codes for commands `bazel build`, `bazel test`:** - -- `1` - Build failed. -- `3` - Build OK, but some tests failed or timed out. -- `4` - Build successful but no tests were found even though testing was - requested. - - -**For `bazel run`:** - -- `1` - Build failed. -- If the build succeeds but the executed subprocess returns a non-zero exit - code it will be the exit code of the command as well. - -**For `bazel query`:** - -- `3` - Partial success, but the query encountered 1 or more errors in the - input BUILD file set and therefore the results of the operation are not 100% - reliable. This is likely due to a `--keep_going` option on the command line. -- `7` - Command failure. - -Future Bazel versions may add additional exit codes, replacing generic failure -exit code `1` with a different non-zero value with a particular meaning. -However, all non-zero exit values will always constitute an error. - - -### Reading the .bazelrc file - -By default, Bazel reads the [`.bazelrc` file](/run/bazelrc) from the base -workspace directory or the user's home directory. Whether or not this is -desirable is a choice for your script; if your script needs to be perfectly -hermetic (such as when doing release builds), you should disable reading the -.bazelrc file by using the option `--bazelrc=/dev/null`. If you want to perform -a build using the user's preferred settings, the default behavior is better. - -### Command log - -The Bazel output is also available in a command log file which you can find with -the following command: - -```posix-terminal -bazel info command_log -``` - -The command log file contains the interleaved stdout and stderr streams of the -most recent Bazel command. Note that running `bazel info` will overwrite the -contents of this file, since it then becomes the most recent Bazel command. -However, the location of the command log file will not change unless you change -the setting of the `--output_base` or `--output_user_root` options. - -### Parsing output - -The Bazel output is quite easy to parse for many purposes. Two options that may -be helpful for your script are `--noshow_progress` which suppresses progress -messages, and --show_result n, which controls whether or -not "build up-to-date" messages are printed; these messages may be parsed to -discover which targets were successfully built, and the location of the output -files they created. Be sure to specify a very large value of _n_ if you rely on -these messages. - -## Troubleshooting performance by profiling - -See the [Performance Profiling](/rules/performance#performance-profiling) section. diff --git a/8.4.2/start/android-app.mdx b/8.4.2/start/android-app.mdx deleted file mode 100644 index b0e6f1b..0000000 --- a/8.4.2/start/android-app.mdx +++ /dev/null @@ -1,391 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an Android App' ---- - - -**Note:** There are known limitations on using Bazel for building Android apps. -Visit the Github [team-Android hotlist](https://github.com/bazelbuild/bazel/issues?q=is%3Aissue+is%3Aopen+label%3Ateam-Android) to see the list of known issues. While the Bazel team and Open Source Software (OSS) contributors work actively to address known issues, users should be aware that Android Studio does not officially support Bazel projects. - -This tutorial covers how to build a simple Android app using Bazel. - -Bazel supports building Android apps using the -[Android rules](/reference/be/android). - -This tutorial is intended for Windows, macOS and Linux users and does not -require experience with Bazel or Android app development. You do not need to -write any Android code in this tutorial. - -## What you'll learn - -In this tutorial you learn how to: - -* Set up your environment by installing Bazel and Android Studio, and - downloading the sample project. -* Set up a Bazel workspace that contains the source code - for the app and a `MODULE.bazel` file that identifies the top level of the - workspace directory. -* Update the `MODULE.bazel` file to contain references to the required - external dependencies, like the Android SDK. -* Create a `BUILD` file. -* Build the app with Bazel. -* Deploy and run the app on an Android emulator or physical device. - -## Before you begin - -### Install Bazel - -Before you begin the tutorial, install the following software: - -* **Bazel.** To install, follow the [installation instructions](/install). -* **Android Studio.** To install, follow the steps to [download Android - Studio](https://developer.android.com/sdk/index.html). - Execute the setup wizard to download the SDK and configure your environment. -* (Optional) **Git.** Use `git` to download the Android app project. - -### Get the sample project - -For the sample project, use a basic Android app project in -[Bazel's examples repository](https://github.com/bazelbuild/examples). - -This app has a single button that prints a greeting when clicked: - -![Button greeting](/docs/images/android_tutorial_app.png "Tutorial app button greeting") - -**Figure 1.** Android app button greeting. - -Clone the repository with `git` (or [download the ZIP file -directly](https://github.com/bazelbuild/examples/archive/master.zip)): - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in `examples/android/tutorial`. For -the rest of the tutorial, you will be executing commands in this directory. - -### Review the source files - -Take a look at the source files for the app. - -``` -. -├── README.md -└── src - └── main - ├── AndroidManifest.xml - └── java - └── com - └── example - └── bazel - ├── AndroidManifest.xml - ├── Greeter.java - ├── MainActivity.java - └── res - ├── layout - │ └── activity_main.xml - └── values - ├── colors.xml - └── strings.xml -``` - -The key files and directories are: - -| Name | Location | -| ----------------------- | ---------------------------------------------------------------------------------------- | -| Android manifest files | `src/main/AndroidManifest.xml` and `src/main/java/com/example/bazel/AndroidManifest.xml` | -| Android source files | `src/main/java/com/example/bazel/MainActivity.java` and `Greeter.java` | -| Resource file directory | `src/main/java/com/example/bazel/res/` | - - -## Build with Bazel - -### Set up the workspace - -A [workspace](/concepts/build-ref#workspace) is a directory that contains the -source files for one or more software projects, and has a `MODULE.bazel` file at -its root. - -The `MODULE.bazel` file may be empty or may contain references to [external -dependencies](/external/overview) required to build your project. - -First, run the following command to create an empty `MODULE.bazel` file: - -| OS | Command | -| ------------------------ | ----------------------------------- | -| Linux, macOS | `touch MODULE.bazel` | -| Windows (Command Prompt) | `type nul > MODULE.bazel` | -| Windows (PowerShell) | `New-Item MODULE.bazel -ItemType file` | - -### Running Bazel - -You can now check if Bazel is running correctly with the command: - -```posix-terminal -bazel info workspace -``` - -If Bazel prints the path of the current directory, you're good to go! If the -`MODULE.bazel` file does not exist, you may see an error message like: - -``` -ERROR: The 'info' command is only supported from within a workspace. -``` - -### Integrate with the Android SDK - -Bazel needs to run the Android SDK -[build tools](https://developer.android.com/tools/revisions/build-tools.html) -to build the app. This means that you need to add some information to your -`MODULE.bazel` file so that Bazel knows where to find them. - -Add the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android", version = "0.5.1") -``` - -This will use the Android SDK at the path referenced by the `ANDROID_HOME` -environment variable, and automatically detect the highest API level and the -latest version of build tools installed within that location. - -You can set the `ANDROID_HOME` variable to the location of the Android SDK. Find -the path to the installed SDK using Android Studio's [SDK -Manager](https://developer.android.com/studio/intro/update#sdk-manager). -Assuming the SDK is installed to default locations, you can use the following -commands to set the `ANDROID_HOME` variable: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `export ANDROID_HOME=$HOME/Android/Sdk/` | -| macOS | `export ANDROID_HOME=$HOME/Library/Android/sdk` | -| Windows (Command Prompt) | `set ANDROID_HOME=%LOCALAPPDATA%\Android\Sdk` | -| Windows (PowerShell) | `$env:ANDROID_HOME="$env:LOCALAPPDATA\Android\Sdk"` | - -The above commands set the variable only for the current shell session. To make -them permanent, run the following commands: - -| OS | Command | -| ------------------------ | --------------------------------------------------- | -| Linux | `echo "export ANDROID_HOME=$HOME/Android/Sdk/" >> ~/.bashrc` | -| macOS | `echo "export ANDROID_HOME=$HOME/Library/Android/Sdk/" >> ~/.bashrc` | -| Windows (Command Prompt) | `setx ANDROID_HOME "%LOCALAPPDATA%\Android\Sdk"` | -| Windows (PowerShell) | `[System.Environment]::SetEnvironmentVariable('ANDROID_HOME', "$env:LOCALAPPDATA\Android\Sdk", [System.EnvironmentVariableTarget]::User)` | - - -**Optional:** If you want to compile native code into your Android app, you -also need to download the [Android -NDK](https://developer.android.com/ndk/downloads/index.html) -and use `rules_android_ndk` by adding the following line to your `MODULE.bazel` file: - -```python -bazel_dep(name = "rules_android_ndk", version = "0.1.2") -``` - - -For more information, read [Using the Android Native Development Kit with -Bazel](/docs/android-ndk). - -It's not necessary to set the API levels to the same value for the SDK and NDK. -[This page](https://developer.android.com/ndk/guides/stable_apis.html) -contains a map from Android releases to NDK-supported API levels. - -### Create a BUILD file - -A [`BUILD` file](/concepts/build-files) describes the relationship -between a set of build outputs, like compiled Android resources from `aapt` or -class files from `javac`, and their dependencies. These dependencies may be -source files (Java, C++) in your workspace or other build outputs. `BUILD` files -are written in a language called **Starlark**. - -`BUILD` files are part of a concept in Bazel known as the *package hierarchy*. -The package hierarchy is a logical structure that overlays the directory -structure in your workspace. Each [package](/concepts/build-ref#packages) is a -directory (and its subdirectories) that contains a related set of source files -and a `BUILD` file. The package also includes any subdirectories, excluding -those that contain their own `BUILD` file. The *package name* is the path to the -`BUILD` file relative to the `MODULE.bazel` file. - -Note that Bazel's package hierarchy is conceptually different from the Java -package hierarchy of your Android App directory where the `BUILD` file is -located, although the directories may be organized identically. - -For the simple Android app in this tutorial, the source files in `src/main/` -comprise a single Bazel package. A more complex project may have many nested -packages. - -#### Add an android_library rule - -A `BUILD` file contains several different types of declarations for Bazel. The -most important type is the -[build rule](/concepts/build-files#types-of-build-rules), which tells -Bazel how to build an intermediate or final software output from a set of source -files or other dependencies. Bazel provides two build rules, -[`android_library`](/reference/be/android#android_library) and -[`android_binary`](/reference/be/android#android_binary), that you can use to -build an Android app. - -For this tutorial, you'll first use the -`android_library` rule to tell Bazel to build an [Android library -module](http://developer.android.com/tools/projects/index.html#LibraryProjects) -from the app source code and resource files. You'll then use the -`android_binary` rule to tell Bazel how to build the Android application package. - -Create a new `BUILD` file in the `src/main/java/com/example/bazel` directory, -and declare a new `android_library` target: - -`src/main/java/com/example/bazel/BUILD`: - -```python -package( - default_visibility = ["//src:__subpackages__"], -) - -android_library( - name = "greeter_activity", - srcs = [ - "Greeter.java", - "MainActivity.java", - ], - manifest = "AndroidManifest.xml", - resource_files = glob(["res/**"]), -) -``` - -The `android_library` build rule contains a set of attributes that specify the -information that Bazel needs to build a library module from the source files. -Note also that the name of the rule is `greeter_activity`. You'll reference the -rule using this name as a dependency in the `android_binary` rule. - -#### Add an android_binary rule - -The [`android_binary`](/reference/be/android#android_binary) rule builds -the Android application package (`.apk` file) for your app. - -Create a new `BUILD` file in the `src/main/` directory, -and declare a new `android_binary` target: - -`src/main/BUILD`: - -```python -android_binary( - name = "app", - manifest = "AndroidManifest.xml", - deps = ["//src/main/java/com/example/bazel:greeter_activity"], -) -``` - -Here, the `deps` attribute references the output of the `greeter_activity` rule -you added to the `BUILD` file above. This means that when Bazel builds the -output of this rule it checks first to see if the output of the -`greeter_activity` library rule has been built and is up-to-date. If not, Bazel -builds it and then uses that output to build the application package file. - -Now, save and close the file. - -### Build the app - -Try building the app! Run the following command to build the -`android_binary` target: - -```posix-terminal -bazel build //src/main:app -``` - -The [`build`](/docs/user-manual#build) subcommand instructs Bazel to build the -target that follows. The target is specified as the name of a build rule inside -a `BUILD` file, with along with the package path relative to your workspace -directory. For this example, the target is `app` and the package path is -`//src/main/`. - -Note that you can sometimes omit the package path or target name, depending on -your current working directory at the command line and the name of the target. -For more details about target labels and paths, see [Labels](/concepts/labels). - -Bazel will start to build the sample app. During the build process, its output -will appear similar to the following: - -```bash -INFO: Analysed target //src/main:app (0 packages loaded, 0 targets configured). -INFO: Found 1 target... -Target //src/main:app up-to-date: - bazel-bin/src/main/app_deploy.jar - bazel-bin/src/main/app_unsigned.apk - bazel-bin/src/main/app.apk -``` - -#### Locate the build outputs - -Bazel puts the outputs of both intermediate and final build operations in a set -of per-user, per-workspace output directories. These directories are symlinked -from the following locations at the top-level of the project directory, where -the `MODULE.bazel` file is: - -* `bazel-bin` stores binary executables and other runnable build outputs -* `bazel-genfiles` stores intermediary source files that are generated by - Bazel rules -* `bazel-out` stores other types of build outputs - -Bazel stores the Android `.apk` file generated using the `android_binary` rule -in the `bazel-bin/src/main` directory, where the subdirectory name `src/main` is -derived from the name of the Bazel package. - -At a command prompt, list the contents of this directory and find the `app.apk` -file: - -| OS | Command | -| ------------------------ | ------------------------ | -| Linux, macOS | `ls bazel-bin/src/main` | -| Windows (Command Prompt) | `dir bazel-bin\src\main` | -| Windows (PowerShell) | `ls bazel-bin\src\main` | - - -### Run the app - -You can now deploy the app to a connected Android device or emulator from the -command line using the [`bazel -mobile-install`](/docs/user-manual#mobile-install) command. This command uses -the Android Debug Bridge (`adb`) to communicate with the device. You must set up -your device to use `adb` following the instructions in [Android Debug -Bridge](http://developer.android.com/tools/help/adb.html) before deployment. You -can also choose to install the app on the Android emulator included in Android -Studio. Make sure the emulator is running before executing the command below. - -Enter the following: - -```posix-terminal -bazel mobile-install //src/main:app -``` - -Next, find and launch the "Bazel Tutorial App": - -![Bazel tutorial app](/docs/images/android_tutorial_before.png "Bazel tutorial app") - -**Figure 2.** Bazel tutorial app. - -**Congratulations! You have just installed your first Bazel-built Android app.** - -Note that the `mobile-install` subcommand also supports the -[`--incremental`](/docs/user-manual#mobile-install) flag that can be used to -deploy only those parts of the app that have changed since the last deployment. - -It also supports the `--start_app` flag to start the app immediately upon -installing it. - -## Further reading - -For more details, see these pages: - -* Open issues on [GitHub](https://github.com/bazelbuild/bazel/issues) -* More information on [mobile-install](/docs/mobile-install) -* Integrate external dependencies like AppCompat, Guava and JUnit from Maven - repositories using [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) -* Run Robolectric tests with the [robolectric-bazel](https://github.com/robolectric/robolectric-bazel) - integration. -* Testing your app with [Android instrumentation tests](/docs/android-instrumentation-test) -* Integrating C and C++ code into your Android app with the [NDK](/docs/android-ndk) -* See more Bazel example projects of: - * [a Kotlin app](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_kotlin_app) - * [Robolectric testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_local_test) - * [Espresso testing](https://github.com/bazelbuild/rules_jvm_external/tree/master/examples/android_instrumentation_test) - -Happy building! diff --git a/8.4.2/start/cpp.mdx b/8.4.2/start/cpp.mdx deleted file mode 100644 index adb7c71..0000000 --- a/8.4.2/start/cpp.mdx +++ /dev/null @@ -1,411 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a C++ Project' ---- - - - -## Introduction - -New to Bazel? You're in the right place. Follow this First Build tutorial for a -simplified introduction to using Bazel. This tutorial defines key terms as they -are used in Bazel's context and walks you through the basics of the Bazel -workflow. Starting with the tools you need, you will build and run three -projects with increasing complexity and learn how and why they get more complex. - -While Bazel is a [build system](https://bazel.build/basics/build-systems) that -supports multi-language builds, this tutorial uses a C++ project as an example -and provides the general guidelines and flow that apply to most languages. - -Estimated completion time: 30 minutes. - -### Prerequisites - -Start by [installing Bazel](https://bazel.build/install), if you haven't -already. This tutorial uses Git for source control, so for best results [install -Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) as well. - -Next, retrieve the sample project from Bazel's GitHub repository by running the -following in your command-line tool of choice: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/cpp-tutorial` -directory. - -Take a look at how it's structured: - -```none -examples -└── cpp-tutorial - ├──stage1 - │ ├── main - │ │ ├── BUILD - │ │ └── hello-world.cc - │ └── MODULE.bazel - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel - └──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -There are three sets of files, each set representing a stage in this tutorial. -In the first stage, you will build a single [target] -(https://bazel.build/reference/glossary#target) residing in a single [package] -(https://bazel.build/reference/glossary#package). In the second stage, you will -build both a binary and a library from a single package. In the third and final -stage, you will build a project with multiple packages and build it with -multiple targets. - -### Summary: Introduction - -By installing Bazel (and Git) and cloning the repository for this tutorial, you -have laid the foundation for your first build with Bazel. Continue to the next -section to define some terms and set up your -[workspace](https://bazel.build/reference/glossary#workspace). - -## Getting started - -Before you can build a project, you need to set up its workspace. A workspace -is a directory that holds your project's source files and Bazel's build outputs. -It also contains these significant files: - -* The `MODULE.bazel` file, which identifies the directory and its contents as - a Bazel workspace and lives at the root of the project's directory - structure. It's also where you specify your external dependencies. -* One or more [`BUILD` - files](https://bazel.build/reference/glossary#build-file), which tell Bazel - how to build different parts of the project. A directory within the - workspace that contains a `BUILD` file is a - [package](https://bazel.build/reference/glossary#package). (More on packages - later in this tutorial.) - -In future projects, to designate a directory as a Bazel workspace, create an -empty file named `MODULE.bazel` in that directory. For the purposes of this -tutorial, a `MODULE.bazel` file is already present in each stage. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. Each -`BUILD` file requires at least one -[rule](https://bazel.build/reference/glossary#rule) as a set of instructions, -which tells Bazel how to build the outputs you want, such as executable binaries -or libraries. Each instance of a build rule in the `BUILD` file is called a -[target](https://bazel.build/reference/glossary#target) and points to a specific -set of source files and -[dependencies](https://bazel.build/reference/glossary#dependency). A target can -also point to other targets. - -Take a look at the `BUILD` file in the `cpp-tutorial/stage1/main` directory: - -```bazel -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], -) -``` - -In our example, the `hello-world` target instantiates Bazel's built-in -[`cc_binary` rule](https://bazel.build/reference/be/c-cpp#cc_binary). The rule -tells Bazel to build a self-contained executable binary from the -`hello-world.cc`> source file with no dependencies. - -### Summary: getting started - -Now you are familiar with some key terms, and what they mean in the context of -this project and Bazel in general. In the next section, you will build and test -Stage 1 of the project. - -## Stage 1: single target, single package - -It's time to build the first part of the project. For a visual reference, the -structure of the Stage 1 section of the project is: - -```none -examples -└── cpp-tutorial - └──stage1 - ├── main - │ ├── BUILD - │ └── hello-world.cc - └── MODULE.bazel -``` - -Run the following to move to the `cpp-tutorial/stage1` directory: - -```posix-terminal -cd cpp-tutorial/stage1 -``` - -Next, run: - -```posix-terminal -bazel build //main:hello-world -``` - -In the target label, the `//main:` part is the location of the `BUILD` file -relative to the root of the workspace, and `hello-world` is the target name in -the `BUILD` file. - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.267s, Critical Path: 0.25s -``` - -You just built your first Bazel target. Bazel places build outputs in the -`bazel-bin` directory at the root of the workspace. - -Now test your freshly built binary, which is: - -```posix-terminal -bazel-bin/main/hello-world -``` - -This results in a printed "`Hello world`" message. - -Here's the dependency graph of Stage 1: - -![Dependency graph for hello-world displays a single target with a single source -file.](/docs/images/cpp-tutorial-stage1.png "Dependency graph for hello-world -displays a single target with a single source file.") - -### Summary: stage 1 - -Now that you have completed your first build, you have a basic idea of how a -build is structured. In the next stage, you will add complexity by adding -another target. - -## Stage 2: multiple build targets - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages. This allows for fast -incremental builds – that is, Bazel only rebuilds what's changed – and speeds up -your builds by building multiple parts of a project at once. This stage of the -tutorial adds a target, and the next adds a package. - -This is the directory you are working with for Stage 2: - -```none - ├──stage2 - │ ├── main - │ │ ├── BUILD - │ │ ├── hello-world.cc - │ │ ├── hello-greet.cc - │ │ └── hello-greet.h - │ └── MODULE.bazel -``` - -Take a look at the `BUILD` file in the `cpp-tutorial/stage2/main` directory: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - ], -) -``` - -With this `BUILD` file, Bazel first builds the `hello-greet` library (using -Bazel's built-in [`cc_library` -rule](https://bazel.build/reference/be/c-cpp#cc_library)), then the -`hello-world` binary. The `deps` attribute in the `hello-world` target tells -Bazel that the `hello-greet` library is required to build the `hello-world` -binary. - -Before you can build this new version of the project, you need to change -directories, switching to the `cpp-tutorial/stage2` directory by running: - -```posix-terminal -cd ../stage2 -``` - -Now you can build the new binary using the following familiar command: - -```posix-terminal -bazel build //main:hello-world -``` - -Once again, Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 2.399s, Critical Path: 0.30s -``` - -Now you can test your freshly built binary, which returns another "`Hello -world`": - -```posix-terminal -bazel-bin/main/hello-world -``` - -If you now modify `hello-greet.cc` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `hello-world` depends on an -extra input named `hello-greet`: - -![Dependency graph for `hello-world` displays dependency changes after -modification to the file.](/docs/images/cpp-tutorial-stage2.png "Dependency -graph for `hello-world` displays dependency changes after modification to the -file.") - -### Summary: stage 2 - -You've now built the project with two targets. The `hello-world` target builds -one source file and depends on one other target (`//main:hello-greet`), which -builds two additional source files. In the next section, take it a step further -and add another package. - -## Stage 3: multiple packages - -This next stage adds another layer of complication and builds a project with -multiple packages. Take a look at the structure and contents of the -`cpp-tutorial/stage3` directory: - -```none -└──stage3 - ├── main - │ ├── BUILD - │ ├── hello-world.cc - │ ├── hello-greet.cc - │ └── hello-greet.h - ├── lib - │ ├── BUILD - │ ├── hello-time.cc - │ └── hello-time.h - └── MODULE.bazel -``` - -You can see that now there are two sub-directories, and each contains a `BUILD` -file. Therefore, to Bazel, the workspace now contains two packages: `lib` and -`main`. - -Take a look at the `lib/BUILD` file: - -```bazel -cc_library( - name = "hello-time", - srcs = ["hello-time.cc"], - hdrs = ["hello-time.h"], - visibility = ["//main:__pkg__"], -) -``` - -And at the `main/BUILD` file: - -```bazel -cc_library( - name = "hello-greet", - srcs = ["hello-greet.cc"], - hdrs = ["hello-greet.h"], -) - -cc_binary( - name = "hello-world", - srcs = ["hello-world.cc"], - deps = [ - ":hello-greet", - "//lib:hello-time", - ], -) -``` - -The `hello-world` target in the main package depends on the` hello-time` target -in the `lib` package (hence the target label `//lib:hello-time`) - Bazel knows -this through the `deps` attribute. You can see this reflected in the dependency -graph: - -![Dependency graph for `hello-world` displays how the target in the main package -depends on the target in the `lib` -package.](/docs/images/cpp-tutorial-stage3.png "Dependency graph for -`hello-world` displays how the target in the main package depends on the target -in the `lib` package.") - -For the build to succeed, you make the `//lib:hello-time` target in `lib/BUILD` -explicitly visible to targets in `main/BUILD` using the visibility attribute. -This is because by default targets are only visible to other targets in the same -`BUILD` file. Bazel uses target visibility to prevent issues such as libraries -containing implementation details leaking into public APIs. - -Now build this final version of the project. Switch to the `cpp-tutorial/stage3` -directory by running: - -```posix-terminal -cd ../stage3 -``` - -Once again, run the following command: - -```posix-terminal -bazel build //main:hello-world -``` - -Bazel produces something that looks like this: - -```none -INFO: Found 1 target... -Target //main:hello-world up-to-date: - bazel-bin/main/hello-world -INFO: Elapsed time: 0.167s, Critical Path: 0.00s -``` - -Now test the last binary of this tutorial for a final `Hello world` message: - -```posix-terminal -bazel-bin/main/hello-world -``` - -### Summary: stage 3 - -You've now built the project as two packages with three targets and understand -the dependencies between them, which equips you to go forth and build future -projects with Bazel. In the next section, take a look at how to continue your -Bazel journey. - -## Next steps - -You've now completed your first basic build with Bazel, but this is just the -start. Here are some more resources to continue learning with Bazel: - -* To keep focusing on C++, read about common [C++ build use - cases](https://bazel.build/tutorials/cpp-use-cases). -* To get started with building other applications with Bazel, see the - tutorials for [Java](https://bazel.build/start/java), [Android - application](https://bazel.build/start/android-app), or [iOS - application](https://bazel.build/start/ios-app). -* To learn more about working with local and remote repositories, read about - [external dependencies](https://bazel.build/docs/external). -* To learn more about Bazel's other rules, see this [reference - guide](https://bazel.build/rules). - -Happy building! diff --git a/8.4.2/start/ios-app.mdx b/8.4.2/start/ios-app.mdx deleted file mode 100644 index 0b860ab..0000000 --- a/8.4.2/start/ios-app.mdx +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: 'Bazel Tutorial: Build an iOS App' ---- - - -This tutorial has been moved into the [bazelbuild/rules_apple](https://github.com/bazelbuild/rules_apple/blob/master/doc/tutorials/ios-app.md) repository. diff --git a/8.4.2/start/java.mdx b/8.4.2/start/java.mdx deleted file mode 100644 index b892917..0000000 --- a/8.4.2/start/java.mdx +++ /dev/null @@ -1,436 +0,0 @@ ---- -title: 'Bazel Tutorial: Build a Java Project' ---- - - - -This tutorial covers the basics of building Java applications with -Bazel. You will set up your workspace and build a simple Java project that -illustrates key Bazel concepts, such as targets and `BUILD` files. - -Estimated completion time: 30 minutes. - -## What you'll learn - -In this tutorial you learn how to: - -* Build a target -* Visualize the project's dependencies -* Split the project into multiple targets and packages -* Control target visibility across packages -* Reference targets through labels -* Deploy a target - -## Before you begin - -### Install Bazel - -To prepare for the tutorial, first [Install Bazel](/install) if -you don't have it installed already. - -### Install the JDK - -1. Install Java JDK (preferred version is 11, however versions between 8 and 15 are supported). - -2. Set the JAVA\_HOME environment variable to point to the JDK. - * On Linux/macOS: - - export JAVA_HOME="$(dirname $(dirname $(realpath $(which javac))))" - * On Windows: - 1. Open Control Panel. - 2. Go to "System and Security" > "System" > "Advanced System Settings" > "Advanced" tab > "Environment Variables..." . - 3. Under the "User variables" list (the one on the top), click "New...". - 4. In the "Variable name" field, enter `JAVA_HOME`. - 5. Click "Browse Directory...". - 6. Navigate to the JDK directory (for example `C:\Program Files\Java\jdk1.8.0_152`). - 7. Click "OK" on all dialog windows. - -### Get the sample project - -Retrieve the sample project from Bazel's GitHub repository: - -```posix-terminal -git clone https://github.com/bazelbuild/examples -``` - -The sample project for this tutorial is in the `examples/java-tutorial` -directory and is structured as follows: - -``` -java-tutorial -├── BUILD -├── src -│ └── main -│ └── java -│ └── com -│ └── example -│ ├── cmdline -│ │ ├── BUILD -│ │ └── Runner.java -│ ├── Greeting.java -│ └── ProjectRunner.java -└── MODULE.bazel -``` - -## Build with Bazel - -### Set up the workspace - -Before you can build a project, you need to set up its workspace. A workspace is -a directory that holds your project's source files and Bazel's build outputs. It -also contains files that Bazel recognizes as special: - -* The `MODULE.bazel` file, which identifies the directory and its contents as a - Bazel workspace and lives at the root of the project's directory structure, - -* One or more `BUILD` files, which tell Bazel how to build different parts of - the project. (A directory within the workspace that contains a `BUILD` file - is a *package*. You will learn about packages later in this tutorial.) - -To designate a directory as a Bazel workspace, create an empty file named -`MODULE.bazel` in that directory. - -When Bazel builds the project, all inputs and dependencies must be in the same -workspace. Files residing in different workspaces are independent of one -another unless linked, which is beyond the scope of this tutorial. - -### Understand the BUILD file - -A `BUILD` file contains several different types of instructions for Bazel. -The most important type is the *build rule*, which tells Bazel how to build the -desired outputs, such as executable binaries or libraries. Each instance -of a build rule in the `BUILD` file is called a *target* and points to a -specific set of source files and dependencies. A target can also point to other -targets. - -Take a look at the `java-tutorial/BUILD` file: - -```python -java_binary( - name = "ProjectRunner", - srcs = glob(["src/main/java/com/example/*.java"]), -) -``` - -In our example, the `ProjectRunner` target instantiates Bazel's built-in -[`java_binary` rule](/reference/be/java#java_binary). The rule tells Bazel to -build a `.jar` file and a wrapper shell script (both named after the target). - -The attributes in the target explicitly state its dependencies and options. -While the `name` attribute is mandatory, many are optional. For example, in the -`ProjectRunner` rule target, `name` is the name of the target, `srcs` specifies -the source files that Bazel uses to build the target, and `main_class` specifies -the class that contains the main method. (You may have noticed that our example -uses [glob](/reference/be/functions#glob) to pass a set of source files to Bazel -instead of listing them one by one.) - -### Build the project - -To build your sample project, navigate to the `java-tutorial` directory -and run: - -```posix-terminal -bazel build //:ProjectRunner -``` -In the target label, the `//` part is the location of the `BUILD` file -relative to the root of the workspace (in this case, the root itself), -and `ProjectRunner` is the target name in the `BUILD` file. (You will -learn about target labels in more detail at the end of this tutorial.) - -Bazel produces output similar to the following: - -```bash - INFO: Found 1 target... - Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner - INFO: Elapsed time: 1.021s, Critical Path: 0.83s -``` - -Congratulations, you just built your first Bazel target! Bazel places build -outputs in the `bazel-bin` directory at the root of the workspace. Browse -through its contents to get an idea for Bazel's output structure. - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -### Review the dependency graph - -Bazel requires build dependencies to be explicitly declared in BUILD files. -Bazel uses those statements to create the project's dependency graph, which -enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -```posix-terminal -bazel query --notool_deps --noimplicit_deps "deps(//:ProjectRunner)" --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//:ProjectRunner` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -As you can see, the project has a single target that build two source files with -no additional dependencies: - -![Dependency graph of the target 'ProjectRunner'](/docs/images/tutorial_java_01.svg) - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. - -## Refine your Bazel build - -While a single target is sufficient for small projects, you may want to split -larger projects into multiple targets and packages to allow for fast incremental -builds (that is, only rebuild what's changed) and to speed up your builds by -building multiple parts of a project at once. - -### Specify multiple build targets - -You can split the sample project build into two targets. Replace the contents of -the `java-tutorial/BUILD` file with the following: - -```python -java_binary( - name = "ProjectRunner", - srcs = ["src/main/java/com/example/ProjectRunner.java"], - main_class = "com.example.ProjectRunner", - deps = [":greeter"], -) - -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], -) -``` - -With this configuration, Bazel first builds the `greeter` library, then the -`ProjectRunner` binary. The `deps` attribute in `java_binary` tells Bazel that -the `greeter` library is required to build the `ProjectRunner` binary. - -To build this new version of the project, run the following command: - -```posix-terminal -bazel build //:ProjectRunner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //:ProjectRunner up-to-date: - bazel-bin/ProjectRunner.jar - bazel-bin/ProjectRunner -INFO: Elapsed time: 2.454s, Critical Path: 1.58s -``` - -Now test your freshly built binary: - -```posix-terminal -bazel-bin/ProjectRunner -``` - -If you now modify `ProjectRunner.java` and rebuild the project, Bazel only -recompiles that file. - -Looking at the dependency graph, you can see that `ProjectRunner` depends on the -same inputs as it did before, but the structure of the build is different: - -![Dependency graph of the target 'ProjectRunner' after adding a dependency]( -/docs/images/tutorial_java_02.svg) - -You've now built the project with two targets. The `ProjectRunner` target builds -one source files and depends on one other target (`:greeter`), which builds -one additional source file. - -### Use multiple packages - -Let’s now split the project into multiple packages. If you take a look at the -`src/main/java/com/example/cmdline` directory, you can see that it also contains -a `BUILD` file, plus some source files. Therefore, to Bazel, the workspace now -contains two packages, `//src/main/java/com/example/cmdline` and `//` (since -there is a `BUILD` file at the root of the workspace). - -Take a look at the `src/main/java/com/example/cmdline/BUILD` file: - -```python -java_binary( - name = "runner", - srcs = ["Runner.java"], - main_class = "com.example.cmdline.Runner", - deps = ["//:greeter"], -) -``` - -The `runner` target depends on the `greeter` target in the `//` package (hence -the target label `//:greeter`) - Bazel knows this through the `deps` attribute. -Take a look at the dependency graph: - -![Dependency graph of the target 'runner'](/docs/images/tutorial_java_03.svg) - -However, for the build to succeed, you must explicitly give the `runner` target -in `//src/main/java/com/example/cmdline/BUILD` visibility to targets in -`//BUILD` using the `visibility` attribute. This is because by default targets -are only visible to other targets in the same `BUILD` file. (Bazel uses target -visibility to prevent issues such as libraries containing implementation details -leaking into public APIs.) - -To do this, add the `visibility` attribute to the `greeter` target in -`java-tutorial/BUILD` as shown below: - -```python -java_library( - name = "greeter", - srcs = ["src/main/java/com/example/Greeting.java"], - visibility = ["//src/main/java/com/example/cmdline:__pkg__"], -) -``` - -Now you can build the new package by running the following command at the root -of the workspace: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner.jar - bazel-bin/src/main/java/com/example/cmdline/runner - INFO: Elapsed time: 1.576s, Critical Path: 0.81s -``` - -Now test your freshly built binary: - -```posix-terminal -./bazel-bin/src/main/java/com/example/cmdline/runner -``` - -You've now modified the project to build as two packages, each containing one -target, and understand the dependencies between them. - - -## Use labels to reference targets - -In `BUILD` files and at the command line, Bazel uses target labels to reference -targets - for example, `//:ProjectRunner` or -`//src/main/java/com/example/cmdline:runner`. Their syntax is as follows: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path to the -directory containing the `BUILD` file, and `target-name` is what you named the -target in the `BUILD` file (the `name` attribute). If the target is a file -target, then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full path. - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. - -For example, for targets in the `java-tutorial/BUILD` file, you did not have to -specify a package path, since the workspace root is itself a package (`//`), and -your two target labels were simply `//:ProjectRunner` and `//:greeter`. - -However, for targets in the `//src/main/java/com/example/cmdline/BUILD` file you -had to specify the full package path of `//src/main/java/com/example/cmdline` -and your target label was `//src/main/java/com/example/cmdline:runner`. - -## Package a Java target for deployment - -Let’s now package a Java target for deployment by building the binary with all -of its runtime dependencies. This lets you run the binary outside of your -development environment. - -As you remember, the [java_binary](/reference/be/java#java_binary) build rule -produces a `.jar` and a wrapper shell script. Take a look at the contents of -`runner.jar` using this command: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner.jar -``` - -The contents are: - -``` -META-INF/ -META-INF/MANIFEST.MF -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -``` -As you can see, `runner.jar` contains `Runner.class`, but not its dependency, -`Greeting.class`. The `runner` script that Bazel generates adds `greeter.jar` -to the classpath, so if you leave it like this, it will run locally, but it -won't run standalone on another machine. Fortunately, the `java_binary` rule -allows you to build a self-contained, deployable binary. To build it, append -`_deploy.jar` to the target name: - -```posix-terminal -bazel build //src/main/java/com/example/cmdline:runner_deploy.jar -``` - -Bazel produces output similar to the following: - -``` -INFO: Found 1 target... -Target //src/main/java/com/example/cmdline:runner_deploy.jar up-to-date: - bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -INFO: Elapsed time: 1.700s, Critical Path: 0.23s -``` -You have just built `runner_deploy.jar`, which you can run standalone away from -your development environment since it contains the required runtime -dependencies. Take a look at the contents of this standalone JAR using the -same command as before: - -```posix-terminal -jar tf bazel-bin/src/main/java/com/example/cmdline/runner_deploy.jar -``` - -The contents include all of the necessary classes to run: - -``` -META-INF/ -META-INF/MANIFEST.MF -build-data.properties -com/ -com/example/ -com/example/cmdline/ -com/example/cmdline/Runner.class -com/example/Greeting.class -``` - -## Further reading - -For more details, see: - -* [rules_jvm_external](https://github.com/bazelbuild/rules_jvm_external) for - rules to manage transitive Maven dependencies. - -* [External Dependencies](/docs/external) to learn more about working with - local and remote repositories. - -* The [other rules](/rules) to learn more about Bazel. - -* The [C++ build tutorial](/start/cpp) to get started with building - C++ projects with Bazel. - -* The [Android application tutorial](/start/android-app ) and - [iOS application tutorial](/start/ios-app)) to get started with - building mobile applications for Android and iOS with Bazel. - -Happy building! diff --git a/8.4.2/tutorials/cpp-dependency.mdx b/8.4.2/tutorials/cpp-dependency.mdx deleted file mode 100644 index 194cc73..0000000 --- a/8.4.2/tutorials/cpp-dependency.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: 'Review the dependency graph' ---- - - - -A successful build has all of its dependencies explicitly stated in the `BUILD` -file. Bazel uses those statements to create the project's dependency graph, -which enables accurate incremental builds. - -To visualize the sample project's dependencies, you can generate a text -representation of the dependency graph by running this command at the -workspace root: - -``` -bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph -``` - -The above command tells Bazel to look for all dependencies for the target -`//main:hello-world` (excluding host and implicit dependencies) and format the -output as a graph. - -Then, paste the text into [GraphViz](http://www.webgraphviz.com/). - -On Ubuntu, you can view the graph locally by installing GraphViz and the xdot -Dot Viewer: - -``` -sudo apt update && sudo apt install graphviz xdot -``` - -Then you can generate and view the graph by piping the text output above -straight to xdot: - -``` -xdot <(bazel query --notool_deps --noimplicit_deps "deps(//main:hello-world)" \ - --output graph) -``` - -As you can see, the first stage of the sample project has a single target -that builds a single source file with no additional dependencies: - -![Dependency graph for 'hello-world'](/docs/images/cpp-tutorial-stage1.png "Dependency graph") - -**Figure 1.** Dependency graph for `hello-world` displays a single target with a single -source file. - -After you set up your workspace, build your project, and examine its -dependencies, then you can add some complexity. diff --git a/8.4.2/tutorials/cpp-labels.mdx b/8.4.2/tutorials/cpp-labels.mdx deleted file mode 100644 index 78d0dbc..0000000 --- a/8.4.2/tutorials/cpp-labels.mdx +++ /dev/null @@ -1,27 +0,0 @@ ---- -title: 'Use labels to reference targets' ---- - - - -In `BUILD` files and at the command line, Bazel uses *labels* to reference -targets - for example, `//main:hello-world` or `//lib:hello-time`. Their syntax -is: - -``` -//path/to/package:target-name -``` - -If the target is a rule target, then `path/to/package` is the path from the -workspace root (the directory containing the `MODULE.bazel` file) to the directory -containing the `BUILD` file, and `target-name` is what you named the target -in the `BUILD` file (the `name` attribute). If the target is a file target, -then `path/to/package` is the path to the root of the package, and -`target-name` is the name of the target file, including its full -path relative to the root of the package (the directory containing the -package's `BUILD` file). - -When referencing targets at the repository root, the package path is empty, -just use `//:target-name`. When referencing targets within the same `BUILD` -file, you can even skip the `//` workspace root identifier and just use -`:target-name`. diff --git a/8.4.2/tutorials/cpp-use-cases.mdx b/8.4.2/tutorials/cpp-use-cases.mdx deleted file mode 100644 index 6695cce..0000000 --- a/8.4.2/tutorials/cpp-use-cases.mdx +++ /dev/null @@ -1,180 +0,0 @@ ---- -title: 'Common C++ Build Use Cases' ---- - - - -Here you will find some of the most common use cases for building C++ projects -with Bazel. If you have not done so already, get started with building C++ -projects with Bazel by completing the tutorial -[Introduction to Bazel: Build a C++ Project](/start/cpp). - -For information on cc_library and hdrs header files, see -cc_library. - -## Including multiple files in a target - -You can include multiple files in a single target with -glob. -For example: - -```python -cc_library( - name = "build-all-the-files", - srcs = glob(["*.cc"]), - hdrs = glob(["*.h"]), -) -``` - -With this target, Bazel will build all the `.cc` and `.h` files it finds in the -same directory as the `BUILD` file that contains this target (excluding -subdirectories). - -## Using transitive includes - -If a file includes a header, then any rule with that file as a source (that is, -having that file in the `srcs`, `hdrs`, or `textual_hdrs` attribute) should -depend on the included header's library rule. Conversely, only direct -dependencies need to be specified as dependencies. For example, suppose -`sandwich.h` includes `bread.h` and `bread.h` includes `flour.h`. `sandwich.h` -doesn't include `flour.h` (who wants flour in their sandwich?), so the `BUILD` -file would look like this: - -```python -cc_library( - name = "sandwich", - srcs = ["sandwich.cc"], - hdrs = ["sandwich.h"], - deps = [":bread"], -) - -cc_library( - name = "bread", - srcs = ["bread.cc"], - hdrs = ["bread.h"], - deps = [":flour"], -) - -cc_library( - name = "flour", - srcs = ["flour.cc"], - hdrs = ["flour.h"], -) -``` - -Here, the `sandwich` library depends on the `bread` library, which depends -on the `flour` library. - -## Adding include paths - -Sometimes you cannot (or do not want to) root include paths at the workspace -root. Existing libraries might already have an include directory that doesn't -match its path in your workspace. For example, suppose you have the following -directory structure: - -``` -└── my-project - ├── legacy - │   └── some_lib - │   ├── BUILD - │   ├── include - │   │   └── some_lib.h - │   └── some_lib.cc - └── MODULE.bazel -``` - -Bazel will expect `some_lib.h` to be included as -`legacy/some_lib/include/some_lib.h`, but suppose `some_lib.cc` includes -`"some_lib.h"`. To make that include path valid, -`legacy/some_lib/BUILD` will need to specify that the `some_lib/include` -directory is an include directory: - -```python -cc_library( - name = "some_lib", - srcs = ["some_lib.cc"], - hdrs = ["include/some_lib.h"], - copts = ["-Ilegacy/some_lib/include"], -) -``` - -This is especially useful for external dependencies, as their header files -must otherwise be included with a `/` prefix. - -## Include external libraries - -Suppose you are using [Google Test](https://github.com/google/googletest) -. -You can add a dependency on it in the `MODULE.bazel` file to -download Google Test and make it available in your repository: - -```python -bazel_dep(name = "googletest", version = "1.15.2") -``` - -## Writing and running C++ tests - -For example, you could create a test `./test/hello-test.cc`, such as: - -```cpp -#include "gtest/gtest.h" -#include "main/hello-greet.h" - -TEST(HelloTest, GetGreet) { - EXPECT_EQ(get_greet("Bazel"), "Hello Bazel"); -} -``` - -Then create `./test/BUILD` file for your tests: - -```python -cc_test( - name = "hello-test", - srcs = ["hello-test.cc"], - copts = [ - "-Iexternal/gtest/googletest/include", - "-Iexternal/gtest/googletest", - ], - deps = [ - "@googletest//:main", - "//main:hello-greet", - ], -) -``` - -To make `hello-greet` visible to `hello-test`, you must add -`"//test:__pkg__",` to the `visibility` attribute in `./main/BUILD`. - -Now you can use `bazel test` to run the test. - -``` -bazel test test:hello-test -``` - -This produces the following output: - -``` -INFO: Found 1 test target... -Target //test:hello-test up-to-date: - bazel-bin/test/hello-test -INFO: Elapsed time: 4.497s, Critical Path: 2.53s -//test:hello-test PASSED in 0.3s - -Executed 1 out of 1 tests: 1 test passes. -``` - - -## Adding dependencies on precompiled libraries - -If you want to use a library of which you only have a compiled version (for -example, headers and a `.so` file) wrap it in a `cc_library` rule: - -```python -cc_library( - name = "mylib", - srcs = ["mylib.so"], - hdrs = ["mylib.h"], -) -``` - -This way, other C++ targets in your workspace can depend on this rule. diff --git a/8.4.2/versions/index.mdx b/8.4.2/versions/index.mdx deleted file mode 100644 index 4290e57..0000000 --- a/8.4.2/versions/index.mdx +++ /dev/null @@ -1,15 +0,0 @@ ---- -title: 'Documentation Versions' ---- - - - -The default documentation on this website represents the latest version at HEAD. -Each major and minor supported release will have a snapshot of the narrative and -reference documentation that follows the lifecycle of Bazel's version support. - -To see documentation for stable Bazel versions, use the "Versioned docs" -drop-down. - -To see documentation for older Bazel versions prior to Feb 2022, go to -[docs.bazel.build](https://docs.bazel.build/). diff --git a/cleanup-mdx.sh b/cleanup-mdx.sh index 850d855..88e3d21 100755 --- a/cleanup-mdx.sh +++ b/cleanup-mdx.sh @@ -12,10 +12,9 @@ LOCAL_FILES=" echo "Cleaning up .mdx files..." -# Find all .mdx files in the repo, excluding versioned directories -# This excludes any directory starting with a digit (e.g., 6.5.0, 7.6.1, 8.0.1) +# Find all .mdx files in the repo, excluding the versions/ directory find . -name "*.mdx" -type f \ - -not -path "./[0-9]*/*" \ + -not -path "./versions/*" \ -not -path "./.github/*" | while read -r mdx_file; do # Check if this exact file path is in LOCAL_FILES diff --git a/convert-community-to-mdx.sh b/convert-community-to-mdx.sh deleted file mode 100755 index 59a51d3..0000000 --- a/convert-community-to-mdx.sh +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash - -# Script to convert community YAML files to MDX using yq -# Usage: ./convert-community-to-mdx.sh [experts|partners] - -set -e - -FILE=${1} -INPUT_FILE="upstream/site/en/${FILE}/_index.yaml" -TITLE=$(yq eval '.landing_page.rows[0].heading' "$INPUT_FILE") -DESCRIPTION=$(yq eval '.landing_page.rows[0].description' "$INPUT_FILE") - -OUTPUT_FILE="${FILE}.mdx" -# Create the MDX file -cat > "$OUTPUT_FILE" << EOF ---- -title: '$TITLE' ---- - -$DESCRIPTION - ---- - -EOF - -# Process each expert item and group into pairs -yq eval '.landing_page.rows[0].items[]' "$INPUT_FILE" -o json | jq -r ' -"" + "\n" + -.description + "\n" + -"" -' | awk ' -BEGIN { - count = 0 - card_buffer = "" -} -/^" - } - card_buffer = $0 - next -} -{ - card_buffer = card_buffer "\n" $0 -} -/^<\/Card>/ { - print card_buffer - count++ - if (count % 2 == 0) { - print "" - print "" - } - card_buffer = "" -} -END { - if (count % 2 == 1) { - print "" - print "" - } -}' >> "$OUTPUT_FILE" - -echo "Generated $OUTPUT_FILE" diff --git a/copy-upstream-docs.sh b/copy-upstream-docs.sh index ea38846..6e4b876 100755 --- a/copy-upstream-docs.sh +++ b/copy-upstream-docs.sh @@ -45,6 +45,8 @@ reference/flag-cheatsheet.mdx reference/test-encyclopedia.mdx remote/dynamic.mdx rules/lib/globals/bzl.mdx +rules/lib/providers/DebugPackageInfo.mdx +rules/lib/toplevel/java_common.mdx rules/lib/repo/cache.mdx rules/lib/repo/git.mdx rules/lib/repo/http.mdx @@ -99,12 +101,92 @@ transform_docs() { transform_docs "$UPSTREAM_SITE" transform_docs "$REFERENCE_DOCS" -echo "Converting community YAML files to MDX..." -./convert-community-to-mdx.sh "$DEST_DIR/community/experts" -./convert-community-to-mdx.sh "$DEST_DIR/community/partners" +# --- Community Page Conversion Logic --- + +function convert_community_page() { + local topic="$1" # e.g., "experts" or "partners" + local source_yaml="upstream/site/en/community/${topic}/_index.yaml" + local output_mdx="${DEST_DIR}/community/${topic}.mdx" + + if [ ! -f "$source_yaml" ]; then + echo "Skipping ${topic} conversion (source YAML not found)." + return + fi + + echo "Converting ${topic} YAML to MDX..." + + local title=$(yq eval '.landing_page.rows[0].heading' "$source_yaml") + local description=$(yq eval '.landing_page.rows[0].description' "$source_yaml") + + # Ensure destination directory exists + mkdir -p "$(dirname "$output_mdx")" + + # Create the MDX file + cat > "$output_mdx" << EOF +--- +title: '$title' +--- + +$description + +--- + +EOF + + # Process each item and group into pairs, appending to the new file + yq eval '.landing_page.rows[0].items[]' "$source_yaml" -o json | jq -r ' + "" + "\n" + + .description + "\n" + + "" + ' | awk ' + BEGIN { + count = 0 + card_buffer = "" + } + /^" + } + card_buffer = $0 + next + } + { + card_buffer = card_buffer "\n" $0 + } + /^<\/Card>/ { + print card_buffer + count++ + if (count % 2 == 0) { + print "" + print "" + } + card_buffer = "" + } + END { + if (count % 2 == 1) { + print "" + print "" + } + }' >> "$output_mdx" + + echo "Generated $output_mdx" +} + +# Run conversion for community pages and copy images +if [ -d "upstream/site/en/community" ]; then + convert_community_page "experts" + convert_community_page "partners" + + if [ -d "upstream/site/en/community/images" ]; then + echo "Copying community images..." + mkdir -p "$DEST_DIR/community/images" + cp upstream/site/en/community/images/* "$DEST_DIR/community/images/" + fi +else + echo "Skipping community conversion (directory not found)." +fi -echo "Copying community images..." -mkdir -p "$DEST_DIR/community/images" -cp upstream/site/en/community/images/* "$DEST_DIR/community/images/" echo "Done copying docs." diff --git a/docs-versions.json b/docs-versions.json index 8d07dda..4d5ce84 100644 --- a/docs-versions.json +++ b/docs-versions.json @@ -5,7 +5,7 @@ "8.2.1", "8.1.1", "8.0.1", - "7.6.2", + "7.7.0", "6.5.0", "5.4.1" ] diff --git a/docs-versions.vendor_folders.sh b/docs-versions.vendor_folders.sh deleted file mode 100755 index 229504f..0000000 --- a/docs-versions.vendor_folders.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -# Create or update versioned subfolders for docs of all Bazel releases. -# -# This script does the following: -# Reads docs-versions.json to get all available versions -# Checks which version folders are missing -# For each missing version, resets the upstream/ submodule to that tag -# Runs the ./copy-upstream-docs.sh script to copy the docs to the version directory - -set -euo pipefail - -echo "Checking for missing version folders..." - -# Get all versions from docs-versions.json, excluding HEAD -VERSIONS=$(jq -r '.[] | select(. != "HEAD")' docs-versions.json) - -# Check which folders are missing and create them -for VERSION in $VERSIONS; do - if [ ! -d "$VERSION" ]; then - echo "Creating missing folder for version: $VERSION" - - # Change to upstream directory and reset to the specific tag - cd upstream - echo "Resetting submodule to tag: $VERSION" - git fetch origin "refs/tags/$VERSION:refs/tags/$VERSION" - git reset --hard "$VERSION" - - # Go back to the root directory - cd .. - - # Run the copy-upstream-docs.sh script with the version directory - echo "Copying docs to directory: $VERSION" - ./copy-upstream-docs.sh "$VERSION" - - echo "Successfully created docs for version $VERSION" - else - echo "Folder $VERSION already exists, skipping" - fi -done - -echo "All version folders are now up to date!" \ No newline at end of file diff --git a/docs.json b/docs.json index b54c70a..e386e3a 100644 --- a/docs.json +++ b/docs.json @@ -293,23 +293,23 @@ { "group": "Why Bazel?", "pages": [ - "8.4.2/about/intro", - "8.4.2/about/why", - "8.4.2/about/vision", - "8.4.2/about/roadmap", - "8.4.2/about/faq" + "versions/8.4.2/about/intro", + "versions/8.4.2/about/why", + "versions/8.4.2/about/vision", + "versions/8.4.2/about/roadmap", + "versions/8.4.2/about/faq" ] }, { "group": "Build system basics", "pages": [ - "8.4.2/basics", - "8.4.2/basics/build-systems", - "8.4.2/basics/task-based-builds", - "8.4.2/basics/artifact-based-builds", - "8.4.2/basics/distributed-builds", - "8.4.2/basics/dependencies", - "8.4.2/basics/hermeticity" + "versions/8.4.2/basics", + "versions/8.4.2/basics/build-systems", + "versions/8.4.2/basics/task-based-builds", + "versions/8.4.2/basics/artifact-based-builds", + "versions/8.4.2/basics/distributed-builds", + "versions/8.4.2/basics/dependencies", + "versions/8.4.2/basics/hermeticity" ] } ] @@ -320,37 +320,37 @@ { "group": "Install", "pages": [ - "8.4.2/install", - "8.4.2/install/bazelisk", - "8.4.2/install/os-x", - "8.4.2/install/windows", - "8.4.2/install/ubuntu", - "8.4.2/install/suse", - "8.4.2/install/docker-container", - "8.4.2/install/compile-source", - "8.4.2/install/completion", - "8.4.2/install/ide" + "versions/8.4.2/install", + "versions/8.4.2/install/bazelisk", + "versions/8.4.2/install/os-x", + "versions/8.4.2/install/windows", + "versions/8.4.2/install/ubuntu", + "versions/8.4.2/install/suse", + "versions/8.4.2/install/docker-container", + "versions/8.4.2/install/compile-source", + "versions/8.4.2/install/completion", + "versions/8.4.2/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "8.4.2/start/cpp", - "8.4.2/start/java", - "8.4.2/start/android-app", - "8.4.2/start/ios-app" + "versions/8.4.2/start/cpp", + "versions/8.4.2/start/java", + "versions/8.4.2/start/android-app", + "versions/8.4.2/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "8.4.2/concepts/build-ref", - "8.4.2/concepts/labels", - "8.4.2/concepts/build-files", - "8.4.2/concepts/dependencies", - "8.4.2/concepts/visibility", - "8.4.2/concepts/platforms", - "8.4.2/concepts/hermeticity" + "versions/8.4.2/concepts/build-ref", + "versions/8.4.2/concepts/labels", + "versions/8.4.2/concepts/build-files", + "versions/8.4.2/concepts/dependencies", + "versions/8.4.2/concepts/visibility", + "versions/8.4.2/concepts/platforms", + "versions/8.4.2/concepts/hermeticity" ] } ] @@ -361,72 +361,72 @@ { "group": "Releases", "pages": [ - "8.4.2/release", - "8.4.2/release/rolling", - "8.4.2/release/backward-compatibility", - "8.4.2/release/rule-compatibility" + "versions/8.4.2/release", + "versions/8.4.2/release/rolling", + "versions/8.4.2/release/backward-compatibility", + "versions/8.4.2/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "8.4.2/build/style-guide", - "8.4.2/build/share-variables", - "8.4.2/community/recommended-rules", - "8.4.2/run/build" + "versions/8.4.2/build/style-guide", + "versions/8.4.2/build/share-variables", + "versions/8.4.2/community/recommended-rules", + "versions/8.4.2/run/build" ] }, { "group": "Advanced", "pages": [ - "8.4.2/configure/attributes", - "8.4.2/configure/integrate-cpp", - "8.4.2/configure/coverage", - "8.4.2/configure/best-practices", - "8.4.2/configure/windows", - "8.4.2/advanced/performance/build-performance-metrics", - "8.4.2/advanced/performance/build-performance-breakdown", - "8.4.2/advanced/performance/json-trace-profile", - "8.4.2/advanced/performance/memory", - "8.4.2/advanced/performance/iteration-speed" + "versions/8.4.2/configure/attributes", + "versions/8.4.2/configure/integrate-cpp", + "versions/8.4.2/configure/coverage", + "versions/8.4.2/configure/best-practices", + "versions/8.4.2/configure/windows", + "versions/8.4.2/advanced/performance/build-performance-metrics", + "versions/8.4.2/advanced/performance/build-performance-breakdown", + "versions/8.4.2/advanced/performance/json-trace-profile", + "versions/8.4.2/advanced/performance/memory", + "versions/8.4.2/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "8.4.2/remote/rbe", - "8.4.2/remote/rules", - "8.4.2/remote/ci", - "8.4.2/remote/dynamic", - "8.4.2/remote/caching", - "8.4.2/remote/sandbox", - "8.4.2/remote/workspace", - "8.4.2/remote/cache-remote", - "8.4.2/remote/cache-local", - "8.4.2/remote/output-directories", - "8.4.2/remote/persistent", - "8.4.2/remote/multiplex", - "8.4.2/remote/creating", - "8.4.2/remote/bep", - "8.4.2/remote/bep-examples", - "8.4.2/remote/bep-glossary" + "versions/8.4.2/remote/rbe", + "versions/8.4.2/remote/rules", + "versions/8.4.2/remote/ci", + "versions/8.4.2/remote/dynamic", + "versions/8.4.2/remote/caching", + "versions/8.4.2/remote/sandbox", + "versions/8.4.2/remote/workspace", + "versions/8.4.2/remote/cache-remote", + "versions/8.4.2/remote/cache-local", + "versions/8.4.2/remote/output-directories", + "versions/8.4.2/remote/persistent", + "versions/8.4.2/remote/multiplex", + "versions/8.4.2/remote/creating", + "versions/8.4.2/remote/bep", + "versions/8.4.2/remote/bep-examples", + "versions/8.4.2/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "8.4.2/tutorials/cpp-use-cases", - "8.4.2/tutorials/ccp-toolchain-config", - "8.4.2/tutorials/cpp-dependency", - "8.4.2/tutorials/cpp-labels" + "versions/8.4.2/tutorials/cpp-use-cases", + "versions/8.4.2/tutorials/ccp-toolchain-config", + "versions/8.4.2/tutorials/cpp-dependency", + "versions/8.4.2/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "8.4.2/migrate", - "8.4.2/migrate/maven", - "8.4.2/migrate/xcode" + "versions/8.4.2/migrate", + "versions/8.4.2/migrate/maven", + "versions/8.4.2/migrate/xcode" ] } ] @@ -437,34 +437,34 @@ { "group": "Build encyclopedia", "pages": [ - "8.4.2/reference/be/overview", - "8.4.2/reference/be/common-definitions", - "8.4.2/reference/be/make-variables", - "8.4.2/reference/be/functions" + "versions/8.4.2/reference/be/overview", + "versions/8.4.2/reference/be/common-definitions", + "versions/8.4.2/reference/be/make-variables", + "versions/8.4.2/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "8.4.2/reference/command-line-reference" + "versions/8.4.2/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "8.4.2/query/language" + "versions/8.4.2/query/language" ] }, { "group": "Glossary", "pages": [ - "8.4.2/reference/glossary" + "versions/8.4.2/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "8.4.2/reference/flag-cheatsheet" + "versions/8.4.2/reference/flag-cheatsheet" ] } ] @@ -475,35 +475,35 @@ { "group": "Concepts", "pages": [ - "8.4.2/extending/concepts" + "versions/8.4.2/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "8.4.2/rules/rules-tutorial", - "8.4.2/rules/macro-tutorial", - "8.4.2/rules/legacy-macro-tutorial", - "8.4.2/rules/verbs-tutorial", - "8.4.2/rules/language", - "8.4.2/rules/bzl-style", - "8.4.2/rules/challenges", - "8.4.2/rules/windows" + "versions/8.4.2/rules/rules-tutorial", + "versions/8.4.2/rules/macro-tutorial", + "versions/8.4.2/rules/legacy-macro-tutorial", + "versions/8.4.2/rules/verbs-tutorial", + "versions/8.4.2/rules/language", + "versions/8.4.2/rules/bzl-style", + "versions/8.4.2/rules/challenges", + "versions/8.4.2/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "8.4.2/rules/testing", - "8.4.2/rules/performance", - "8.4.2/rules/deploying" + "versions/8.4.2/rules/testing", + "versions/8.4.2/rules/performance", + "versions/8.4.2/rules/deploying" ] }, { "group": "APIs", "pages": [ - "8.4.2/rules/lib/overview", - "8.4.2/rules/lib/globals" + "versions/8.4.2/rules/lib/overview", + "versions/8.4.2/rules/lib/globals" ] } ] @@ -514,36 +514,36 @@ { "group": "Contributing", "pages": [ - "8.4.2/contribute", - "8.4.2/contribute/policy", - "8.4.2/contribute/patch-acceptance", - "8.4.2/contribute/maintainers-guide", - "8.4.2/contribute/codebase", - "8.4.2/contribute/search", - "8.4.2/contribute/statemachine-guide", - "8.4.2/contribute/docs", - "8.4.2/contribute/docs-style-guide", - "8.4.2/contribute/design-documents", - "8.4.2/contribute/release-notes" + "versions/8.4.2/contribute", + "versions/8.4.2/contribute/policy", + "versions/8.4.2/contribute/patch-acceptance", + "versions/8.4.2/contribute/maintainers-guide", + "versions/8.4.2/contribute/codebase", + "versions/8.4.2/contribute/search", + "versions/8.4.2/contribute/statemachine-guide", + "versions/8.4.2/contribute/docs", + "versions/8.4.2/contribute/docs-style-guide", + "versions/8.4.2/contribute/design-documents", + "versions/8.4.2/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "8.4.2/community/update", - "8.4.2/community/sig", - "8.4.2/community/experts", - "8.4.2/community/partners", - "8.4.2/community/users", - "8.4.2/community/recommended-rules", - "8.4.2/community/remote-execution-services" + "versions/8.4.2/community/update", + "versions/8.4.2/community/sig", + "versions/8.4.2/community/experts", + "versions/8.4.2/community/partners", + "versions/8.4.2/community/users", + "versions/8.4.2/community/recommended-rules", + "versions/8.4.2/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "8.4.2/help", - "8.4.2/contribute/policy" + "versions/8.4.2/help", + "versions/8.4.2/contribute/policy" ] } ] @@ -564,23 +564,23 @@ { "group": "Why Bazel?", "pages": [ - "8.3.1/about/intro", - "8.3.1/about/why", - "8.3.1/about/vision", - "8.3.1/about/roadmap", - "8.3.1/about/faq" + "versions/8.3.1/about/intro", + "versions/8.3.1/about/why", + "versions/8.3.1/about/vision", + "versions/8.3.1/about/roadmap", + "versions/8.3.1/about/faq" ] }, { "group": "Build system basics", "pages": [ - "8.3.1/basics", - "8.3.1/basics/build-systems", - "8.3.1/basics/task-based-builds", - "8.3.1/basics/artifact-based-builds", - "8.3.1/basics/distributed-builds", - "8.3.1/basics/dependencies", - "8.3.1/basics/hermeticity" + "versions/8.3.1/basics", + "versions/8.3.1/basics/build-systems", + "versions/8.3.1/basics/task-based-builds", + "versions/8.3.1/basics/artifact-based-builds", + "versions/8.3.1/basics/distributed-builds", + "versions/8.3.1/basics/dependencies", + "versions/8.3.1/basics/hermeticity" ] } ] @@ -591,37 +591,37 @@ { "group": "Install", "pages": [ - "8.3.1/install", - "8.3.1/install/bazelisk", - "8.3.1/install/os-x", - "8.3.1/install/windows", - "8.3.1/install/ubuntu", - "8.3.1/install/suse", - "8.3.1/install/docker-container", - "8.3.1/install/compile-source", - "8.3.1/install/completion", - "8.3.1/install/ide" + "versions/8.3.1/install", + "versions/8.3.1/install/bazelisk", + "versions/8.3.1/install/os-x", + "versions/8.3.1/install/windows", + "versions/8.3.1/install/ubuntu", + "versions/8.3.1/install/suse", + "versions/8.3.1/install/docker-container", + "versions/8.3.1/install/compile-source", + "versions/8.3.1/install/completion", + "versions/8.3.1/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "8.3.1/start/cpp", - "8.3.1/start/java", - "8.3.1/start/android-app", - "8.3.1/start/ios-app" + "versions/8.3.1/start/cpp", + "versions/8.3.1/start/java", + "versions/8.3.1/start/android-app", + "versions/8.3.1/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "8.3.1/concepts/build-ref", - "8.3.1/concepts/labels", - "8.3.1/concepts/build-files", - "8.3.1/concepts/dependencies", - "8.3.1/concepts/visibility", - "8.3.1/concepts/platforms", - "8.3.1/concepts/hermeticity" + "versions/8.3.1/concepts/build-ref", + "versions/8.3.1/concepts/labels", + "versions/8.3.1/concepts/build-files", + "versions/8.3.1/concepts/dependencies", + "versions/8.3.1/concepts/visibility", + "versions/8.3.1/concepts/platforms", + "versions/8.3.1/concepts/hermeticity" ] } ] @@ -632,72 +632,72 @@ { "group": "Releases", "pages": [ - "8.3.1/release", - "8.3.1/release/rolling", - "8.3.1/release/backward-compatibility", - "8.3.1/release/rule-compatibility" + "versions/8.3.1/release", + "versions/8.3.1/release/rolling", + "versions/8.3.1/release/backward-compatibility", + "versions/8.3.1/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "8.3.1/build/style-guide", - "8.3.1/build/share-variables", - "8.3.1/community/recommended-rules", - "8.3.1/run/build" + "versions/8.3.1/build/style-guide", + "versions/8.3.1/build/share-variables", + "versions/8.3.1/community/recommended-rules", + "versions/8.3.1/run/build" ] }, { "group": "Advanced", "pages": [ - "8.3.1/configure/attributes", - "8.3.1/configure/integrate-cpp", - "8.3.1/configure/coverage", - "8.3.1/configure/best-practices", - "8.3.1/configure/windows", - "8.3.1/advanced/performance/build-performance-metrics", - "8.3.1/advanced/performance/build-performance-breakdown", - "8.3.1/advanced/performance/json-trace-profile", - "8.3.1/advanced/performance/memory", - "8.3.1/advanced/performance/iteration-speed" + "versions/8.3.1/configure/attributes", + "versions/8.3.1/configure/integrate-cpp", + "versions/8.3.1/configure/coverage", + "versions/8.3.1/configure/best-practices", + "versions/8.3.1/configure/windows", + "versions/8.3.1/advanced/performance/build-performance-metrics", + "versions/8.3.1/advanced/performance/build-performance-breakdown", + "versions/8.3.1/advanced/performance/json-trace-profile", + "versions/8.3.1/advanced/performance/memory", + "versions/8.3.1/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "8.3.1/remote/rbe", - "8.3.1/remote/rules", - "8.3.1/remote/ci", - "8.3.1/remote/dynamic", - "8.3.1/remote/caching", - "8.3.1/remote/sandbox", - "8.3.1/remote/workspace", - "8.3.1/remote/cache-remote", - "8.3.1/remote/cache-local", - "8.3.1/remote/output-directories", - "8.3.1/remote/persistent", - "8.3.1/remote/multiplex", - "8.3.1/remote/creating", - "8.3.1/remote/bep", - "8.3.1/remote/bep-examples", - "8.3.1/remote/bep-glossary" + "versions/8.3.1/remote/rbe", + "versions/8.3.1/remote/rules", + "versions/8.3.1/remote/ci", + "versions/8.3.1/remote/dynamic", + "versions/8.3.1/remote/caching", + "versions/8.3.1/remote/sandbox", + "versions/8.3.1/remote/workspace", + "versions/8.3.1/remote/cache-remote", + "versions/8.3.1/remote/cache-local", + "versions/8.3.1/remote/output-directories", + "versions/8.3.1/remote/persistent", + "versions/8.3.1/remote/multiplex", + "versions/8.3.1/remote/creating", + "versions/8.3.1/remote/bep", + "versions/8.3.1/remote/bep-examples", + "versions/8.3.1/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "8.3.1/tutorials/cpp-use-cases", - "8.3.1/tutorials/ccp-toolchain-config", - "8.3.1/tutorials/cpp-dependency", - "8.3.1/tutorials/cpp-labels" + "versions/8.3.1/tutorials/cpp-use-cases", + "versions/8.3.1/tutorials/ccp-toolchain-config", + "versions/8.3.1/tutorials/cpp-dependency", + "versions/8.3.1/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "8.3.1/migrate", - "8.3.1/migrate/maven", - "8.3.1/migrate/xcode" + "versions/8.3.1/migrate", + "versions/8.3.1/migrate/maven", + "versions/8.3.1/migrate/xcode" ] } ] @@ -708,34 +708,34 @@ { "group": "Build encyclopedia", "pages": [ - "8.3.1/reference/be/overview", - "8.3.1/reference/be/common-definitions", - "8.3.1/reference/be/make-variables", - "8.3.1/reference/be/functions" + "versions/8.3.1/reference/be/overview", + "versions/8.3.1/reference/be/common-definitions", + "versions/8.3.1/reference/be/make-variables", + "versions/8.3.1/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "8.3.1/reference/command-line-reference" + "versions/8.3.1/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "8.3.1/query/language" + "versions/8.3.1/query/language" ] }, { "group": "Glossary", "pages": [ - "8.3.1/reference/glossary" + "versions/8.3.1/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "8.3.1/reference/flag-cheatsheet" + "versions/8.3.1/reference/flag-cheatsheet" ] } ] @@ -746,35 +746,35 @@ { "group": "Concepts", "pages": [ - "8.3.1/extending/concepts" + "versions/8.3.1/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "8.3.1/rules/rules-tutorial", - "8.3.1/rules/macro-tutorial", - "8.3.1/rules/legacy-macro-tutorial", - "8.3.1/rules/verbs-tutorial", - "8.3.1/rules/language", - "8.3.1/rules/bzl-style", - "8.3.1/rules/challenges", - "8.3.1/rules/windows" + "versions/8.3.1/rules/rules-tutorial", + "versions/8.3.1/rules/macro-tutorial", + "versions/8.3.1/rules/legacy-macro-tutorial", + "versions/8.3.1/rules/verbs-tutorial", + "versions/8.3.1/rules/language", + "versions/8.3.1/rules/bzl-style", + "versions/8.3.1/rules/challenges", + "versions/8.3.1/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "8.3.1/rules/testing", - "8.3.1/rules/performance", - "8.3.1/rules/deploying" + "versions/8.3.1/rules/testing", + "versions/8.3.1/rules/performance", + "versions/8.3.1/rules/deploying" ] }, { "group": "APIs", "pages": [ - "8.3.1/rules/lib/overview", - "8.3.1/rules/lib/globals" + "versions/8.3.1/rules/lib/overview", + "versions/8.3.1/rules/lib/globals" ] } ] @@ -785,36 +785,36 @@ { "group": "Contributing", "pages": [ - "8.3.1/contribute", - "8.3.1/contribute/policy", - "8.3.1/contribute/patch-acceptance", - "8.3.1/contribute/maintainers-guide", - "8.3.1/contribute/codebase", - "8.3.1/contribute/search", - "8.3.1/contribute/statemachine-guide", - "8.3.1/contribute/docs", - "8.3.1/contribute/docs-style-guide", - "8.3.1/contribute/design-documents", - "8.3.1/contribute/release-notes" + "versions/8.3.1/contribute", + "versions/8.3.1/contribute/policy", + "versions/8.3.1/contribute/patch-acceptance", + "versions/8.3.1/contribute/maintainers-guide", + "versions/8.3.1/contribute/codebase", + "versions/8.3.1/contribute/search", + "versions/8.3.1/contribute/statemachine-guide", + "versions/8.3.1/contribute/docs", + "versions/8.3.1/contribute/docs-style-guide", + "versions/8.3.1/contribute/design-documents", + "versions/8.3.1/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "8.3.1/community/update", - "8.3.1/community/sig", - "8.3.1/community/experts", - "8.3.1/community/partners", - "8.3.1/community/users", - "8.3.1/community/recommended-rules", - "8.3.1/community/remote-execution-services" + "versions/8.3.1/community/update", + "versions/8.3.1/community/sig", + "versions/8.3.1/community/experts", + "versions/8.3.1/community/partners", + "versions/8.3.1/community/users", + "versions/8.3.1/community/recommended-rules", + "versions/8.3.1/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "8.3.1/help", - "8.3.1/contribute/policy" + "versions/8.3.1/help", + "versions/8.3.1/contribute/policy" ] } ] @@ -835,23 +835,23 @@ { "group": "Why Bazel?", "pages": [ - "8.2.1/about/intro", - "8.2.1/about/why", - "8.2.1/about/vision", - "8.2.1/about/roadmap", - "8.2.1/about/faq" + "versions/8.2.1/about/intro", + "versions/8.2.1/about/why", + "versions/8.2.1/about/vision", + "versions/8.2.1/about/roadmap", + "versions/8.2.1/about/faq" ] }, { "group": "Build system basics", "pages": [ - "8.2.1/basics", - "8.2.1/basics/build-systems", - "8.2.1/basics/task-based-builds", - "8.2.1/basics/artifact-based-builds", - "8.2.1/basics/distributed-builds", - "8.2.1/basics/dependencies", - "8.2.1/basics/hermeticity" + "versions/8.2.1/basics", + "versions/8.2.1/basics/build-systems", + "versions/8.2.1/basics/task-based-builds", + "versions/8.2.1/basics/artifact-based-builds", + "versions/8.2.1/basics/distributed-builds", + "versions/8.2.1/basics/dependencies", + "versions/8.2.1/basics/hermeticity" ] } ] @@ -862,37 +862,37 @@ { "group": "Install", "pages": [ - "8.2.1/install", - "8.2.1/install/bazelisk", - "8.2.1/install/os-x", - "8.2.1/install/windows", - "8.2.1/install/ubuntu", - "8.2.1/install/suse", - "8.2.1/install/docker-container", - "8.2.1/install/compile-source", - "8.2.1/install/completion", - "8.2.1/install/ide" + "versions/8.2.1/install", + "versions/8.2.1/install/bazelisk", + "versions/8.2.1/install/os-x", + "versions/8.2.1/install/windows", + "versions/8.2.1/install/ubuntu", + "versions/8.2.1/install/suse", + "versions/8.2.1/install/docker-container", + "versions/8.2.1/install/compile-source", + "versions/8.2.1/install/completion", + "versions/8.2.1/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "8.2.1/start/cpp", - "8.2.1/start/java", - "8.2.1/start/android-app", - "8.2.1/start/ios-app" + "versions/8.2.1/start/cpp", + "versions/8.2.1/start/java", + "versions/8.2.1/start/android-app", + "versions/8.2.1/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "8.2.1/concepts/build-ref", - "8.2.1/concepts/labels", - "8.2.1/concepts/build-files", - "8.2.1/concepts/dependencies", - "8.2.1/concepts/visibility", - "8.2.1/concepts/platforms", - "8.2.1/concepts/hermeticity" + "versions/8.2.1/concepts/build-ref", + "versions/8.2.1/concepts/labels", + "versions/8.2.1/concepts/build-files", + "versions/8.2.1/concepts/dependencies", + "versions/8.2.1/concepts/visibility", + "versions/8.2.1/concepts/platforms", + "versions/8.2.1/concepts/hermeticity" ] } ] @@ -903,72 +903,72 @@ { "group": "Releases", "pages": [ - "8.2.1/release", - "8.2.1/release/rolling", - "8.2.1/release/backward-compatibility", - "8.2.1/release/rule-compatibility" + "versions/8.2.1/release", + "versions/8.2.1/release/rolling", + "versions/8.2.1/release/backward-compatibility", + "versions/8.2.1/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "8.2.1/build/style-guide", - "8.2.1/build/share-variables", - "8.2.1/community/recommended-rules", - "8.2.1/run/build" + "versions/8.2.1/build/style-guide", + "versions/8.2.1/build/share-variables", + "versions/8.2.1/community/recommended-rules", + "versions/8.2.1/run/build" ] }, { "group": "Advanced", "pages": [ - "8.2.1/configure/attributes", - "8.2.1/configure/integrate-cpp", - "8.2.1/configure/coverage", - "8.2.1/configure/best-practices", - "8.2.1/configure/windows", - "8.2.1/advanced/performance/build-performance-metrics", - "8.2.1/advanced/performance/build-performance-breakdown", - "8.2.1/advanced/performance/json-trace-profile", - "8.2.1/advanced/performance/memory", - "8.2.1/advanced/performance/iteration-speed" + "versions/8.2.1/configure/attributes", + "versions/8.2.1/configure/integrate-cpp", + "versions/8.2.1/configure/coverage", + "versions/8.2.1/configure/best-practices", + "versions/8.2.1/configure/windows", + "versions/8.2.1/advanced/performance/build-performance-metrics", + "versions/8.2.1/advanced/performance/build-performance-breakdown", + "versions/8.2.1/advanced/performance/json-trace-profile", + "versions/8.2.1/advanced/performance/memory", + "versions/8.2.1/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "8.2.1/remote/rbe", - "8.2.1/remote/rules", - "8.2.1/remote/ci", - "8.2.1/remote/dynamic", - "8.2.1/remote/caching", - "8.2.1/remote/sandbox", - "8.2.1/remote/workspace", - "8.2.1/remote/cache-remote", - "8.2.1/remote/cache-local", - "8.2.1/remote/output-directories", - "8.2.1/remote/persistent", - "8.2.1/remote/multiplex", - "8.2.1/remote/creating", - "8.2.1/remote/bep", - "8.2.1/remote/bep-examples", - "8.2.1/remote/bep-glossary" + "versions/8.2.1/remote/rbe", + "versions/8.2.1/remote/rules", + "versions/8.2.1/remote/ci", + "versions/8.2.1/remote/dynamic", + "versions/8.2.1/remote/caching", + "versions/8.2.1/remote/sandbox", + "versions/8.2.1/remote/workspace", + "versions/8.2.1/remote/cache-remote", + "versions/8.2.1/remote/cache-local", + "versions/8.2.1/remote/output-directories", + "versions/8.2.1/remote/persistent", + "versions/8.2.1/remote/multiplex", + "versions/8.2.1/remote/creating", + "versions/8.2.1/remote/bep", + "versions/8.2.1/remote/bep-examples", + "versions/8.2.1/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "8.2.1/tutorials/cpp-use-cases", - "8.2.1/tutorials/ccp-toolchain-config", - "8.2.1/tutorials/cpp-dependency", - "8.2.1/tutorials/cpp-labels" + "versions/8.2.1/tutorials/cpp-use-cases", + "versions/8.2.1/tutorials/ccp-toolchain-config", + "versions/8.2.1/tutorials/cpp-dependency", + "versions/8.2.1/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "8.2.1/migrate", - "8.2.1/migrate/maven", - "8.2.1/migrate/xcode" + "versions/8.2.1/migrate", + "versions/8.2.1/migrate/maven", + "versions/8.2.1/migrate/xcode" ] } ] @@ -979,34 +979,34 @@ { "group": "Build encyclopedia", "pages": [ - "8.2.1/reference/be/overview", - "8.2.1/reference/be/common-definitions", - "8.2.1/reference/be/make-variables", - "8.2.1/reference/be/functions" + "versions/8.2.1/reference/be/overview", + "versions/8.2.1/reference/be/common-definitions", + "versions/8.2.1/reference/be/make-variables", + "versions/8.2.1/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "8.2.1/reference/command-line-reference" + "versions/8.2.1/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "8.2.1/query/language" + "versions/8.2.1/query/language" ] }, { "group": "Glossary", "pages": [ - "8.2.1/reference/glossary" + "versions/8.2.1/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "8.2.1/reference/flag-cheatsheet" + "versions/8.2.1/reference/flag-cheatsheet" ] } ] @@ -1017,35 +1017,35 @@ { "group": "Concepts", "pages": [ - "8.2.1/extending/concepts" + "versions/8.2.1/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "8.2.1/rules/rules-tutorial", - "8.2.1/rules/macro-tutorial", - "8.2.1/rules/legacy-macro-tutorial", - "8.2.1/rules/verbs-tutorial", - "8.2.1/rules/language", - "8.2.1/rules/bzl-style", - "8.2.1/rules/challenges", - "8.2.1/rules/windows" + "versions/8.2.1/rules/rules-tutorial", + "versions/8.2.1/rules/macro-tutorial", + "versions/8.2.1/rules/legacy-macro-tutorial", + "versions/8.2.1/rules/verbs-tutorial", + "versions/8.2.1/rules/language", + "versions/8.2.1/rules/bzl-style", + "versions/8.2.1/rules/challenges", + "versions/8.2.1/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "8.2.1/rules/testing", - "8.2.1/rules/performance", - "8.2.1/rules/deploying" + "versions/8.2.1/rules/testing", + "versions/8.2.1/rules/performance", + "versions/8.2.1/rules/deploying" ] }, { "group": "APIs", "pages": [ - "8.2.1/rules/lib/overview", - "8.2.1/rules/lib/globals" + "versions/8.2.1/rules/lib/overview", + "versions/8.2.1/rules/lib/globals" ] } ] @@ -1056,36 +1056,36 @@ { "group": "Contributing", "pages": [ - "8.2.1/contribute", - "8.2.1/contribute/policy", - "8.2.1/contribute/patch-acceptance", - "8.2.1/contribute/maintainers-guide", - "8.2.1/contribute/codebase", - "8.2.1/contribute/search", - "8.2.1/contribute/statemachine-guide", - "8.2.1/contribute/docs", - "8.2.1/contribute/docs-style-guide", - "8.2.1/contribute/design-documents", - "8.2.1/contribute/release-notes" + "versions/8.2.1/contribute", + "versions/8.2.1/contribute/policy", + "versions/8.2.1/contribute/patch-acceptance", + "versions/8.2.1/contribute/maintainers-guide", + "versions/8.2.1/contribute/codebase", + "versions/8.2.1/contribute/search", + "versions/8.2.1/contribute/statemachine-guide", + "versions/8.2.1/contribute/docs", + "versions/8.2.1/contribute/docs-style-guide", + "versions/8.2.1/contribute/design-documents", + "versions/8.2.1/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "8.2.1/community/update", - "8.2.1/community/sig", - "8.2.1/community/experts", - "8.2.1/community/partners", - "8.2.1/community/users", - "8.2.1/community/recommended-rules", - "8.2.1/community/remote-execution-services" + "versions/8.2.1/community/update", + "versions/8.2.1/community/sig", + "versions/8.2.1/community/experts", + "versions/8.2.1/community/partners", + "versions/8.2.1/community/users", + "versions/8.2.1/community/recommended-rules", + "versions/8.2.1/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "8.2.1/help", - "8.2.1/contribute/policy" + "versions/8.2.1/help", + "versions/8.2.1/contribute/policy" ] } ] @@ -1106,23 +1106,23 @@ { "group": "Why Bazel?", "pages": [ - "8.1.1/about/intro", - "8.1.1/about/why", - "8.1.1/about/vision", - "8.1.1/about/roadmap", - "8.1.1/about/faq" + "versions/8.1.1/about/intro", + "versions/8.1.1/about/why", + "versions/8.1.1/about/vision", + "versions/8.1.1/about/roadmap", + "versions/8.1.1/about/faq" ] }, { "group": "Build system basics", "pages": [ - "8.1.1/basics", - "8.1.1/basics/build-systems", - "8.1.1/basics/task-based-builds", - "8.1.1/basics/artifact-based-builds", - "8.1.1/basics/distributed-builds", - "8.1.1/basics/dependencies", - "8.1.1/basics/hermeticity" + "versions/8.1.1/basics", + "versions/8.1.1/basics/build-systems", + "versions/8.1.1/basics/task-based-builds", + "versions/8.1.1/basics/artifact-based-builds", + "versions/8.1.1/basics/distributed-builds", + "versions/8.1.1/basics/dependencies", + "versions/8.1.1/basics/hermeticity" ] } ] @@ -1133,37 +1133,37 @@ { "group": "Install", "pages": [ - "8.1.1/install", - "8.1.1/install/bazelisk", - "8.1.1/install/os-x", - "8.1.1/install/windows", - "8.1.1/install/ubuntu", - "8.1.1/install/suse", - "8.1.1/install/docker-container", - "8.1.1/install/compile-source", - "8.1.1/install/completion", - "8.1.1/install/ide" + "versions/8.1.1/install", + "versions/8.1.1/install/bazelisk", + "versions/8.1.1/install/os-x", + "versions/8.1.1/install/windows", + "versions/8.1.1/install/ubuntu", + "versions/8.1.1/install/suse", + "versions/8.1.1/install/docker-container", + "versions/8.1.1/install/compile-source", + "versions/8.1.1/install/completion", + "versions/8.1.1/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "8.1.1/start/cpp", - "8.1.1/start/java", - "8.1.1/start/android-app", - "8.1.1/start/ios-app" + "versions/8.1.1/start/cpp", + "versions/8.1.1/start/java", + "versions/8.1.1/start/android-app", + "versions/8.1.1/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "8.1.1/concepts/build-ref", - "8.1.1/concepts/labels", - "8.1.1/concepts/build-files", - "8.1.1/concepts/dependencies", - "8.1.1/concepts/visibility", - "8.1.1/concepts/platforms", - "8.1.1/concepts/hermeticity" + "versions/8.1.1/concepts/build-ref", + "versions/8.1.1/concepts/labels", + "versions/8.1.1/concepts/build-files", + "versions/8.1.1/concepts/dependencies", + "versions/8.1.1/concepts/visibility", + "versions/8.1.1/concepts/platforms", + "versions/8.1.1/concepts/hermeticity" ] } ] @@ -1174,72 +1174,72 @@ { "group": "Releases", "pages": [ - "8.1.1/release", - "8.1.1/release/rolling", - "8.1.1/release/backward-compatibility", - "8.1.1/release/rule-compatibility" + "versions/8.1.1/release", + "versions/8.1.1/release/rolling", + "versions/8.1.1/release/backward-compatibility", + "versions/8.1.1/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "8.1.1/build/style-guide", - "8.1.1/build/share-variables", - "8.1.1/community/recommended-rules", - "8.1.1/run/build" + "versions/8.1.1/build/style-guide", + "versions/8.1.1/build/share-variables", + "versions/8.1.1/community/recommended-rules", + "versions/8.1.1/run/build" ] }, { "group": "Advanced", "pages": [ - "8.1.1/configure/attributes", - "8.1.1/configure/integrate-cpp", - "8.1.1/configure/coverage", - "8.1.1/configure/best-practices", - "8.1.1/configure/windows", - "8.1.1/advanced/performance/build-performance-metrics", - "8.1.1/advanced/performance/build-performance-breakdown", - "8.1.1/advanced/performance/json-trace-profile", - "8.1.1/advanced/performance/memory", - "8.1.1/advanced/performance/iteration-speed" + "versions/8.1.1/configure/attributes", + "versions/8.1.1/configure/integrate-cpp", + "versions/8.1.1/configure/coverage", + "versions/8.1.1/configure/best-practices", + "versions/8.1.1/configure/windows", + "versions/8.1.1/advanced/performance/build-performance-metrics", + "versions/8.1.1/advanced/performance/build-performance-breakdown", + "versions/8.1.1/advanced/performance/json-trace-profile", + "versions/8.1.1/advanced/performance/memory", + "versions/8.1.1/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "8.1.1/remote/rbe", - "8.1.1/remote/rules", - "8.1.1/remote/ci", - "8.1.1/remote/dynamic", - "8.1.1/remote/caching", - "8.1.1/remote/sandbox", - "8.1.1/remote/workspace", - "8.1.1/remote/cache-remote", - "8.1.1/remote/cache-local", - "8.1.1/remote/output-directories", - "8.1.1/remote/persistent", - "8.1.1/remote/multiplex", - "8.1.1/remote/creating", - "8.1.1/remote/bep", - "8.1.1/remote/bep-examples", - "8.1.1/remote/bep-glossary" + "versions/8.1.1/remote/rbe", + "versions/8.1.1/remote/rules", + "versions/8.1.1/remote/ci", + "versions/8.1.1/remote/dynamic", + "versions/8.1.1/remote/caching", + "versions/8.1.1/remote/sandbox", + "versions/8.1.1/remote/workspace", + "versions/8.1.1/remote/cache-remote", + "versions/8.1.1/remote/cache-local", + "versions/8.1.1/remote/output-directories", + "versions/8.1.1/remote/persistent", + "versions/8.1.1/remote/multiplex", + "versions/8.1.1/remote/creating", + "versions/8.1.1/remote/bep", + "versions/8.1.1/remote/bep-examples", + "versions/8.1.1/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "8.1.1/tutorials/cpp-use-cases", - "8.1.1/tutorials/ccp-toolchain-config", - "8.1.1/tutorials/cpp-dependency", - "8.1.1/tutorials/cpp-labels" + "versions/8.1.1/tutorials/cpp-use-cases", + "versions/8.1.1/tutorials/ccp-toolchain-config", + "versions/8.1.1/tutorials/cpp-dependency", + "versions/8.1.1/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "8.1.1/migrate", - "8.1.1/migrate/maven", - "8.1.1/migrate/xcode" + "versions/8.1.1/migrate", + "versions/8.1.1/migrate/maven", + "versions/8.1.1/migrate/xcode" ] } ] @@ -1250,34 +1250,34 @@ { "group": "Build encyclopedia", "pages": [ - "8.1.1/reference/be/overview", - "8.1.1/reference/be/common-definitions", - "8.1.1/reference/be/make-variables", - "8.1.1/reference/be/functions" + "versions/8.1.1/reference/be/overview", + "versions/8.1.1/reference/be/common-definitions", + "versions/8.1.1/reference/be/make-variables", + "versions/8.1.1/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "8.1.1/reference/command-line-reference" + "versions/8.1.1/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "8.1.1/query/language" + "versions/8.1.1/query/language" ] }, { "group": "Glossary", "pages": [ - "8.1.1/reference/glossary" + "versions/8.1.1/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "8.1.1/reference/flag-cheatsheet" + "versions/8.1.1/reference/flag-cheatsheet" ] } ] @@ -1288,35 +1288,35 @@ { "group": "Concepts", "pages": [ - "8.1.1/extending/concepts" + "versions/8.1.1/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "8.1.1/rules/rules-tutorial", - "8.1.1/rules/macro-tutorial", - "8.1.1/rules/legacy-macro-tutorial", - "8.1.1/rules/verbs-tutorial", - "8.1.1/rules/language", - "8.1.1/rules/bzl-style", - "8.1.1/rules/challenges", - "8.1.1/rules/windows" + "versions/8.1.1/rules/rules-tutorial", + "versions/8.1.1/rules/macro-tutorial", + "versions/8.1.1/rules/legacy-macro-tutorial", + "versions/8.1.1/rules/verbs-tutorial", + "versions/8.1.1/rules/language", + "versions/8.1.1/rules/bzl-style", + "versions/8.1.1/rules/challenges", + "versions/8.1.1/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "8.1.1/rules/testing", - "8.1.1/rules/performance", - "8.1.1/rules/deploying" + "versions/8.1.1/rules/testing", + "versions/8.1.1/rules/performance", + "versions/8.1.1/rules/deploying" ] }, { "group": "APIs", "pages": [ - "8.1.1/rules/lib/overview", - "8.1.1/rules/lib/globals" + "versions/8.1.1/rules/lib/overview", + "versions/8.1.1/rules/lib/globals" ] } ] @@ -1327,36 +1327,36 @@ { "group": "Contributing", "pages": [ - "8.1.1/contribute", - "8.1.1/contribute/policy", - "8.1.1/contribute/patch-acceptance", - "8.1.1/contribute/maintainers-guide", - "8.1.1/contribute/codebase", - "8.1.1/contribute/search", - "8.1.1/contribute/statemachine-guide", - "8.1.1/contribute/docs", - "8.1.1/contribute/docs-style-guide", - "8.1.1/contribute/design-documents", - "8.1.1/contribute/release-notes" + "versions/8.1.1/contribute", + "versions/8.1.1/contribute/policy", + "versions/8.1.1/contribute/patch-acceptance", + "versions/8.1.1/contribute/maintainers-guide", + "versions/8.1.1/contribute/codebase", + "versions/8.1.1/contribute/search", + "versions/8.1.1/contribute/statemachine-guide", + "versions/8.1.1/contribute/docs", + "versions/8.1.1/contribute/docs-style-guide", + "versions/8.1.1/contribute/design-documents", + "versions/8.1.1/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "8.1.1/community/update", - "8.1.1/community/sig", - "8.1.1/community/experts", - "8.1.1/community/partners", - "8.1.1/community/users", - "8.1.1/community/recommended-rules", - "8.1.1/community/remote-execution-services" + "versions/8.1.1/community/update", + "versions/8.1.1/community/sig", + "versions/8.1.1/community/experts", + "versions/8.1.1/community/partners", + "versions/8.1.1/community/users", + "versions/8.1.1/community/recommended-rules", + "versions/8.1.1/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "8.1.1/help", - "8.1.1/contribute/policy" + "versions/8.1.1/help", + "versions/8.1.1/contribute/policy" ] } ] @@ -1377,23 +1377,23 @@ { "group": "Why Bazel?", "pages": [ - "8.0.1/about/intro", - "8.0.1/about/why", - "8.0.1/about/vision", - "8.0.1/about/roadmap", - "8.0.1/about/faq" + "versions/8.0.1/about/intro", + "versions/8.0.1/about/why", + "versions/8.0.1/about/vision", + "versions/8.0.1/about/roadmap", + "versions/8.0.1/about/faq" ] }, { "group": "Build system basics", "pages": [ - "8.0.1/basics", - "8.0.1/basics/build-systems", - "8.0.1/basics/task-based-builds", - "8.0.1/basics/artifact-based-builds", - "8.0.1/basics/distributed-builds", - "8.0.1/basics/dependencies", - "8.0.1/basics/hermeticity" + "versions/8.0.1/basics", + "versions/8.0.1/basics/build-systems", + "versions/8.0.1/basics/task-based-builds", + "versions/8.0.1/basics/artifact-based-builds", + "versions/8.0.1/basics/distributed-builds", + "versions/8.0.1/basics/dependencies", + "versions/8.0.1/basics/hermeticity" ] } ] @@ -1404,37 +1404,37 @@ { "group": "Install", "pages": [ - "8.0.1/install", - "8.0.1/install/bazelisk", - "8.0.1/install/os-x", - "8.0.1/install/windows", - "8.0.1/install/ubuntu", - "8.0.1/install/suse", - "8.0.1/install/docker-container", - "8.0.1/install/compile-source", - "8.0.1/install/completion", - "8.0.1/install/ide" + "versions/8.0.1/install", + "versions/8.0.1/install/bazelisk", + "versions/8.0.1/install/os-x", + "versions/8.0.1/install/windows", + "versions/8.0.1/install/ubuntu", + "versions/8.0.1/install/suse", + "versions/8.0.1/install/docker-container", + "versions/8.0.1/install/compile-source", + "versions/8.0.1/install/completion", + "versions/8.0.1/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "8.0.1/start/cpp", - "8.0.1/start/java", - "8.0.1/start/android-app", - "8.0.1/start/ios-app" + "versions/8.0.1/start/cpp", + "versions/8.0.1/start/java", + "versions/8.0.1/start/android-app", + "versions/8.0.1/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "8.0.1/concepts/build-ref", - "8.0.1/concepts/labels", - "8.0.1/concepts/build-files", - "8.0.1/concepts/dependencies", - "8.0.1/concepts/visibility", - "8.0.1/concepts/platforms", - "8.0.1/concepts/hermeticity" + "versions/8.0.1/concepts/build-ref", + "versions/8.0.1/concepts/labels", + "versions/8.0.1/concepts/build-files", + "versions/8.0.1/concepts/dependencies", + "versions/8.0.1/concepts/visibility", + "versions/8.0.1/concepts/platforms", + "versions/8.0.1/concepts/hermeticity" ] } ] @@ -1445,72 +1445,72 @@ { "group": "Releases", "pages": [ - "8.0.1/release", - "8.0.1/release/rolling", - "8.0.1/release/backward-compatibility", - "8.0.1/release/rule-compatibility" + "versions/8.0.1/release", + "versions/8.0.1/release/rolling", + "versions/8.0.1/release/backward-compatibility", + "versions/8.0.1/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "8.0.1/build/style-guide", - "8.0.1/build/share-variables", - "8.0.1/community/recommended-rules", - "8.0.1/run/build" + "versions/8.0.1/build/style-guide", + "versions/8.0.1/build/share-variables", + "versions/8.0.1/community/recommended-rules", + "versions/8.0.1/run/build" ] }, { "group": "Advanced", "pages": [ - "8.0.1/configure/attributes", - "8.0.1/configure/integrate-cpp", - "8.0.1/configure/coverage", - "8.0.1/configure/best-practices", - "8.0.1/configure/windows", - "8.0.1/advanced/performance/build-performance-metrics", - "8.0.1/advanced/performance/build-performance-breakdown", - "8.0.1/advanced/performance/json-trace-profile", - "8.0.1/advanced/performance/memory", - "8.0.1/advanced/performance/iteration-speed" + "versions/8.0.1/configure/attributes", + "versions/8.0.1/configure/integrate-cpp", + "versions/8.0.1/configure/coverage", + "versions/8.0.1/configure/best-practices", + "versions/8.0.1/configure/windows", + "versions/8.0.1/advanced/performance/build-performance-metrics", + "versions/8.0.1/advanced/performance/build-performance-breakdown", + "versions/8.0.1/advanced/performance/json-trace-profile", + "versions/8.0.1/advanced/performance/memory", + "versions/8.0.1/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "8.0.1/remote/rbe", - "8.0.1/remote/rules", - "8.0.1/remote/ci", - "8.0.1/remote/dynamic", - "8.0.1/remote/caching", - "8.0.1/remote/sandbox", - "8.0.1/remote/workspace", - "8.0.1/remote/cache-remote", - "8.0.1/remote/cache-local", - "8.0.1/remote/output-directories", - "8.0.1/remote/persistent", - "8.0.1/remote/multiplex", - "8.0.1/remote/creating", - "8.0.1/remote/bep", - "8.0.1/remote/bep-examples", - "8.0.1/remote/bep-glossary" + "versions/8.0.1/remote/rbe", + "versions/8.0.1/remote/rules", + "versions/8.0.1/remote/ci", + "versions/8.0.1/remote/dynamic", + "versions/8.0.1/remote/caching", + "versions/8.0.1/remote/sandbox", + "versions/8.0.1/remote/workspace", + "versions/8.0.1/remote/cache-remote", + "versions/8.0.1/remote/cache-local", + "versions/8.0.1/remote/output-directories", + "versions/8.0.1/remote/persistent", + "versions/8.0.1/remote/multiplex", + "versions/8.0.1/remote/creating", + "versions/8.0.1/remote/bep", + "versions/8.0.1/remote/bep-examples", + "versions/8.0.1/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "8.0.1/tutorials/cpp-use-cases", - "8.0.1/tutorials/ccp-toolchain-config", - "8.0.1/tutorials/cpp-dependency", - "8.0.1/tutorials/cpp-labels" + "versions/8.0.1/tutorials/cpp-use-cases", + "versions/8.0.1/tutorials/ccp-toolchain-config", + "versions/8.0.1/tutorials/cpp-dependency", + "versions/8.0.1/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "8.0.1/migrate", - "8.0.1/migrate/maven", - "8.0.1/migrate/xcode" + "versions/8.0.1/migrate", + "versions/8.0.1/migrate/maven", + "versions/8.0.1/migrate/xcode" ] } ] @@ -1521,34 +1521,34 @@ { "group": "Build encyclopedia", "pages": [ - "8.0.1/reference/be/overview", - "8.0.1/reference/be/common-definitions", - "8.0.1/reference/be/make-variables", - "8.0.1/reference/be/functions" + "versions/8.0.1/reference/be/overview", + "versions/8.0.1/reference/be/common-definitions", + "versions/8.0.1/reference/be/make-variables", + "versions/8.0.1/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "8.0.1/reference/command-line-reference" + "versions/8.0.1/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "8.0.1/query/language" + "versions/8.0.1/query/language" ] }, { "group": "Glossary", "pages": [ - "8.0.1/reference/glossary" + "versions/8.0.1/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "8.0.1/reference/flag-cheatsheet" + "versions/8.0.1/reference/flag-cheatsheet" ] } ] @@ -1559,35 +1559,35 @@ { "group": "Concepts", "pages": [ - "8.0.1/extending/concepts" + "versions/8.0.1/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "8.0.1/rules/rules-tutorial", - "8.0.1/rules/macro-tutorial", - "8.0.1/rules/legacy-macro-tutorial", - "8.0.1/rules/verbs-tutorial", - "8.0.1/rules/language", - "8.0.1/rules/bzl-style", - "8.0.1/rules/challenges", - "8.0.1/rules/windows" + "versions/8.0.1/rules/rules-tutorial", + "versions/8.0.1/rules/macro-tutorial", + "versions/8.0.1/rules/legacy-macro-tutorial", + "versions/8.0.1/rules/verbs-tutorial", + "versions/8.0.1/rules/language", + "versions/8.0.1/rules/bzl-style", + "versions/8.0.1/rules/challenges", + "versions/8.0.1/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "8.0.1/rules/testing", - "8.0.1/rules/performance", - "8.0.1/rules/deploying" + "versions/8.0.1/rules/testing", + "versions/8.0.1/rules/performance", + "versions/8.0.1/rules/deploying" ] }, { "group": "APIs", "pages": [ - "8.0.1/rules/lib/overview", - "8.0.1/rules/lib/globals" + "versions/8.0.1/rules/lib/overview", + "versions/8.0.1/rules/lib/globals" ] } ] @@ -1598,36 +1598,36 @@ { "group": "Contributing", "pages": [ - "8.0.1/contribute", - "8.0.1/contribute/policy", - "8.0.1/contribute/patch-acceptance", - "8.0.1/contribute/maintainers-guide", - "8.0.1/contribute/codebase", - "8.0.1/contribute/search", - "8.0.1/contribute/statemachine-guide", - "8.0.1/contribute/docs", - "8.0.1/contribute/docs-style-guide", - "8.0.1/contribute/design-documents", - "8.0.1/contribute/release-notes" + "versions/8.0.1/contribute", + "versions/8.0.1/contribute/policy", + "versions/8.0.1/contribute/patch-acceptance", + "versions/8.0.1/contribute/maintainers-guide", + "versions/8.0.1/contribute/codebase", + "versions/8.0.1/contribute/search", + "versions/8.0.1/contribute/statemachine-guide", + "versions/8.0.1/contribute/docs", + "versions/8.0.1/contribute/docs-style-guide", + "versions/8.0.1/contribute/design-documents", + "versions/8.0.1/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "8.0.1/community/update", - "8.0.1/community/sig", - "8.0.1/community/experts", - "8.0.1/community/partners", - "8.0.1/community/users", - "8.0.1/community/recommended-rules", - "8.0.1/community/remote-execution-services" + "versions/8.0.1/community/update", + "versions/8.0.1/community/sig", + "versions/8.0.1/community/experts", + "versions/8.0.1/community/partners", + "versions/8.0.1/community/users", + "versions/8.0.1/community/recommended-rules", + "versions/8.0.1/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "8.0.1/help", - "8.0.1/contribute/policy" + "versions/8.0.1/help", + "versions/8.0.1/contribute/policy" ] } ] @@ -1637,7 +1637,7 @@ ] }, { - "version": "7.6", + "version": "7.7", "languages": [ { "language": "en", @@ -1648,23 +1648,23 @@ { "group": "Why Bazel?", "pages": [ - "7.6.2/about/intro", - "7.6.2/about/why", - "7.6.2/about/vision", - "7.6.2/about/roadmap", - "7.6.2/about/faq" + "versions/7.7.0/about/intro", + "versions/7.7.0/about/why", + "versions/7.7.0/about/vision", + "versions/7.7.0/about/roadmap", + "versions/7.7.0/about/faq" ] }, { "group": "Build system basics", "pages": [ - "7.6.2/basics", - "7.6.2/basics/build-systems", - "7.6.2/basics/task-based-builds", - "7.6.2/basics/artifact-based-builds", - "7.6.2/basics/distributed-builds", - "7.6.2/basics/dependencies", - "7.6.2/basics/hermeticity" + "versions/7.7.0/basics", + "versions/7.7.0/basics/build-systems", + "versions/7.7.0/basics/task-based-builds", + "versions/7.7.0/basics/artifact-based-builds", + "versions/7.7.0/basics/distributed-builds", + "versions/7.7.0/basics/dependencies", + "versions/7.7.0/basics/hermeticity" ] } ] @@ -1675,37 +1675,37 @@ { "group": "Install", "pages": [ - "7.6.2/install", - "7.6.2/install/bazelisk", - "7.6.2/install/os-x", - "7.6.2/install/windows", - "7.6.2/install/ubuntu", - "7.6.2/install/suse", - "7.6.2/install/docker-container", - "7.6.2/install/compile-source", - "7.6.2/install/completion", - "7.6.2/install/ide" + "versions/7.7.0/install", + "versions/7.7.0/install/bazelisk", + "versions/7.7.0/install/os-x", + "versions/7.7.0/install/windows", + "versions/7.7.0/install/ubuntu", + "versions/7.7.0/install/suse", + "versions/7.7.0/install/docker-container", + "versions/7.7.0/install/compile-source", + "versions/7.7.0/install/completion", + "versions/7.7.0/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "7.6.2/start/cpp", - "7.6.2/start/java", - "7.6.2/start/android-app", - "7.6.2/start/ios-app" + "versions/7.7.0/start/cpp", + "versions/7.7.0/start/java", + "versions/7.7.0/start/android-app", + "versions/7.7.0/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "7.6.2/concepts/build-ref", - "7.6.2/concepts/labels", - "7.6.2/concepts/build-files", - "7.6.2/concepts/dependencies", - "7.6.2/concepts/visibility", - "7.6.2/concepts/platforms", - "7.6.2/concepts/hermeticity" + "versions/7.7.0/concepts/build-ref", + "versions/7.7.0/concepts/labels", + "versions/7.7.0/concepts/build-files", + "versions/7.7.0/concepts/dependencies", + "versions/7.7.0/concepts/visibility", + "versions/7.7.0/concepts/platforms", + "versions/7.7.0/concepts/hermeticity" ] } ] @@ -1716,72 +1716,72 @@ { "group": "Releases", "pages": [ - "7.6.2/release", - "7.6.2/release/rolling", - "7.6.2/release/backward-compatibility", - "7.6.2/release/rule-compatibility" + "versions/7.7.0/release", + "versions/7.7.0/release/rolling", + "versions/7.7.0/release/backward-compatibility", + "versions/7.7.0/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "7.6.2/build/style-guide", - "7.6.2/build/share-variables", - "7.6.2/community/recommended-rules", - "7.6.2/run/build" + "versions/7.7.0/build/style-guide", + "versions/7.7.0/build/share-variables", + "versions/7.7.0/community/recommended-rules", + "versions/7.7.0/run/build" ] }, { "group": "Advanced", "pages": [ - "7.6.2/configure/attributes", - "7.6.2/configure/integrate-cpp", - "7.6.2/configure/coverage", - "7.6.2/configure/best-practices", - "7.6.2/configure/windows", - "7.6.2/advanced/performance/build-performance-metrics", - "7.6.2/advanced/performance/build-performance-breakdown", - "7.6.2/advanced/performance/json-trace-profile", - "7.6.2/advanced/performance/memory", - "7.6.2/advanced/performance/iteration-speed" + "versions/7.7.0/configure/attributes", + "versions/7.7.0/configure/integrate-cpp", + "versions/7.7.0/configure/coverage", + "versions/7.7.0/configure/best-practices", + "versions/7.7.0/configure/windows", + "versions/7.7.0/advanced/performance/build-performance-metrics", + "versions/7.7.0/advanced/performance/build-performance-breakdown", + "versions/7.7.0/advanced/performance/json-trace-profile", + "versions/7.7.0/advanced/performance/memory", + "versions/7.7.0/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "7.6.2/remote/rbe", - "7.6.2/remote/rules", - "7.6.2/remote/ci", - "7.6.2/remote/dynamic", - "7.6.2/remote/caching", - "7.6.2/remote/sandbox", - "7.6.2/remote/workspace", - "7.6.2/remote/cache-remote", - "7.6.2/remote/cache-local", - "7.6.2/remote/output-directories", - "7.6.2/remote/persistent", - "7.6.2/remote/multiplex", - "7.6.2/remote/creating", - "7.6.2/remote/bep", - "7.6.2/remote/bep-examples", - "7.6.2/remote/bep-glossary" + "versions/7.7.0/remote/rbe", + "versions/7.7.0/remote/rules", + "versions/7.7.0/remote/ci", + "versions/7.7.0/remote/dynamic", + "versions/7.7.0/remote/caching", + "versions/7.7.0/remote/sandbox", + "versions/7.7.0/remote/workspace", + "versions/7.7.0/remote/cache-remote", + "versions/7.7.0/remote/cache-local", + "versions/7.7.0/remote/output-directories", + "versions/7.7.0/remote/persistent", + "versions/7.7.0/remote/multiplex", + "versions/7.7.0/remote/creating", + "versions/7.7.0/remote/bep", + "versions/7.7.0/remote/bep-examples", + "versions/7.7.0/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "7.6.2/tutorials/cpp-use-cases", - "7.6.2/tutorials/ccp-toolchain-config", - "7.6.2/tutorials/cpp-dependency", - "7.6.2/tutorials/cpp-labels" + "versions/7.7.0/tutorials/cpp-use-cases", + "versions/7.7.0/tutorials/ccp-toolchain-config", + "versions/7.7.0/tutorials/cpp-dependency", + "versions/7.7.0/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "7.6.2/migrate", - "7.6.2/migrate/maven", - "7.6.2/migrate/xcode" + "versions/7.7.0/migrate", + "versions/7.7.0/migrate/maven", + "versions/7.7.0/migrate/xcode" ] } ] @@ -1792,34 +1792,34 @@ { "group": "Build encyclopedia", "pages": [ - "7.6.2/reference/be/overview", - "7.6.2/reference/be/common-definitions", - "7.6.2/reference/be/make-variables", - "7.6.2/reference/be/functions" + "versions/7.7.0/reference/be/overview", + "versions/7.7.0/reference/be/common-definitions", + "versions/7.7.0/reference/be/make-variables", + "versions/7.7.0/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "7.6.2/reference/command-line-reference" + "versions/7.7.0/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "7.6.2/query/language" + "versions/7.7.0/query/language" ] }, { "group": "Glossary", "pages": [ - "7.6.2/reference/glossary" + "versions/7.7.0/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "7.6.2/reference/flag-cheatsheet" + "versions/7.7.0/reference/flag-cheatsheet" ] } ] @@ -1830,35 +1830,35 @@ { "group": "Concepts", "pages": [ - "7.6.2/extending/concepts" + "versions/7.7.0/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "7.6.2/rules/rules-tutorial", - "7.6.2/rules/macro-tutorial", - "7.6.2/rules/legacy-macro-tutorial", - "7.6.2/rules/verbs-tutorial", - "7.6.2/rules/language", - "7.6.2/rules/bzl-style", - "7.6.2/rules/challenges", - "7.6.2/rules/windows" + "versions/7.7.0/rules/rules-tutorial", + "versions/7.7.0/rules/macro-tutorial", + "versions/7.7.0/rules/legacy-macro-tutorial", + "versions/7.7.0/rules/verbs-tutorial", + "versions/7.7.0/rules/language", + "versions/7.7.0/rules/bzl-style", + "versions/7.7.0/rules/challenges", + "versions/7.7.0/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "7.6.2/rules/testing", - "7.6.2/rules/performance", - "7.6.2/rules/deploying" + "versions/7.7.0/rules/testing", + "versions/7.7.0/rules/performance", + "versions/7.7.0/rules/deploying" ] }, { "group": "APIs", "pages": [ - "7.6.2/rules/lib/overview", - "7.6.2/rules/lib/globals" + "versions/7.7.0/rules/lib/overview", + "versions/7.7.0/rules/lib/globals" ] } ] @@ -1869,36 +1869,36 @@ { "group": "Contributing", "pages": [ - "7.6.2/contribute", - "7.6.2/contribute/policy", - "7.6.2/contribute/patch-acceptance", - "7.6.2/contribute/maintainers-guide", - "7.6.2/contribute/codebase", - "7.6.2/contribute/search", - "7.6.2/contribute/statemachine-guide", - "7.6.2/contribute/docs", - "7.6.2/contribute/docs-style-guide", - "7.6.2/contribute/design-documents", - "7.6.2/contribute/release-notes" + "versions/7.7.0/contribute", + "versions/7.7.0/contribute/policy", + "versions/7.7.0/contribute/patch-acceptance", + "versions/7.7.0/contribute/maintainers-guide", + "versions/7.7.0/contribute/codebase", + "versions/7.7.0/contribute/search", + "versions/7.7.0/contribute/statemachine-guide", + "versions/7.7.0/contribute/docs", + "versions/7.7.0/contribute/docs-style-guide", + "versions/7.7.0/contribute/design-documents", + "versions/7.7.0/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "7.6.2/community/update", - "7.6.2/community/sig", - "7.6.2/community/experts", - "7.6.2/community/partners", - "7.6.2/community/users", - "7.6.2/community/recommended-rules", - "7.6.2/community/remote-execution-services" + "versions/7.7.0/community/update", + "versions/7.7.0/community/sig", + "versions/7.7.0/community/experts", + "versions/7.7.0/community/partners", + "versions/7.7.0/community/users", + "versions/7.7.0/community/recommended-rules", + "versions/7.7.0/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "7.6.2/help", - "7.6.2/contribute/policy" + "versions/7.7.0/help", + "versions/7.7.0/contribute/policy" ] } ] @@ -1919,23 +1919,23 @@ { "group": "Why Bazel?", "pages": [ - "6.5.0/about/intro", - "6.5.0/about/why", - "6.5.0/about/vision", - "6.5.0/about/roadmap", - "6.5.0/about/faq" + "versions/6.5.0/about/intro", + "versions/6.5.0/about/why", + "versions/6.5.0/about/vision", + "versions/6.5.0/about/roadmap", + "versions/6.5.0/about/faq" ] }, { "group": "Build system basics", "pages": [ - "6.5.0/basics", - "6.5.0/basics/build-systems", - "6.5.0/basics/task-based-builds", - "6.5.0/basics/artifact-based-builds", - "6.5.0/basics/distributed-builds", - "6.5.0/basics/dependencies", - "6.5.0/basics/hermeticity" + "versions/6.5.0/basics", + "versions/6.5.0/basics/build-systems", + "versions/6.5.0/basics/task-based-builds", + "versions/6.5.0/basics/artifact-based-builds", + "versions/6.5.0/basics/distributed-builds", + "versions/6.5.0/basics/dependencies", + "versions/6.5.0/basics/hermeticity" ] } ] @@ -1946,37 +1946,37 @@ { "group": "Install", "pages": [ - "6.5.0/install", - "6.5.0/install/bazelisk", - "6.5.0/install/os-x", - "6.5.0/install/windows", - "6.5.0/install/ubuntu", - "6.5.0/install/suse", - "6.5.0/install/docker-container", - "6.5.0/install/compile-source", - "6.5.0/install/completion", - "6.5.0/install/ide" + "versions/6.5.0/install", + "versions/6.5.0/install/bazelisk", + "versions/6.5.0/install/os-x", + "versions/6.5.0/install/windows", + "versions/6.5.0/install/ubuntu", + "versions/6.5.0/install/suse", + "versions/6.5.0/install/docker-container", + "versions/6.5.0/install/compile-source", + "versions/6.5.0/install/completion", + "versions/6.5.0/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "6.5.0/start/cpp", - "6.5.0/start/java", - "6.5.0/start/android-app", - "6.5.0/start/ios-app" + "versions/6.5.0/start/cpp", + "versions/6.5.0/start/java", + "versions/6.5.0/start/android-app", + "versions/6.5.0/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "6.5.0/concepts/build-ref", - "6.5.0/concepts/labels", - "6.5.0/concepts/build-files", - "6.5.0/concepts/dependencies", - "6.5.0/concepts/visibility", - "6.5.0/concepts/platforms", - "6.5.0/concepts/hermeticity" + "versions/6.5.0/concepts/build-ref", + "versions/6.5.0/concepts/labels", + "versions/6.5.0/concepts/build-files", + "versions/6.5.0/concepts/dependencies", + "versions/6.5.0/concepts/visibility", + "versions/6.5.0/concepts/platforms", + "versions/6.5.0/concepts/hermeticity" ] } ] @@ -1987,72 +1987,72 @@ { "group": "Releases", "pages": [ - "6.5.0/release", - "6.5.0/release/rolling", - "6.5.0/release/backward-compatibility", - "6.5.0/release/rule-compatibility" + "versions/6.5.0/release", + "versions/6.5.0/release/rolling", + "versions/6.5.0/release/backward-compatibility", + "versions/6.5.0/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "6.5.0/build/style-guide", - "6.5.0/build/share-variables", - "6.5.0/community/recommended-rules", - "6.5.0/run/build" + "versions/6.5.0/build/style-guide", + "versions/6.5.0/build/share-variables", + "versions/6.5.0/community/recommended-rules", + "versions/6.5.0/run/build" ] }, { "group": "Advanced", "pages": [ - "6.5.0/configure/attributes", - "6.5.0/configure/integrate-cpp", - "6.5.0/configure/coverage", - "6.5.0/configure/best-practices", - "6.5.0/configure/windows", - "6.5.0/advanced/performance/build-performance-metrics", - "6.5.0/advanced/performance/build-performance-breakdown", - "6.5.0/advanced/performance/json-trace-profile", - "6.5.0/advanced/performance/memory", - "6.5.0/advanced/performance/iteration-speed" + "versions/6.5.0/configure/attributes", + "versions/6.5.0/configure/integrate-cpp", + "versions/6.5.0/configure/coverage", + "versions/6.5.0/configure/best-practices", + "versions/6.5.0/configure/windows", + "versions/6.5.0/advanced/performance/build-performance-metrics", + "versions/6.5.0/advanced/performance/build-performance-breakdown", + "versions/6.5.0/advanced/performance/json-trace-profile", + "versions/6.5.0/advanced/performance/memory", + "versions/6.5.0/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "6.5.0/remote/rbe", - "6.5.0/remote/rules", - "6.5.0/remote/ci", - "6.5.0/remote/dynamic", - "6.5.0/remote/caching", - "6.5.0/remote/sandbox", - "6.5.0/remote/workspace", - "6.5.0/remote/cache-remote", - "6.5.0/remote/cache-local", - "6.5.0/remote/output-directories", - "6.5.0/remote/persistent", - "6.5.0/remote/multiplex", - "6.5.0/remote/creating", - "6.5.0/remote/bep", - "6.5.0/remote/bep-examples", - "6.5.0/remote/bep-glossary" + "versions/6.5.0/remote/rbe", + "versions/6.5.0/remote/rules", + "versions/6.5.0/remote/ci", + "versions/6.5.0/remote/dynamic", + "versions/6.5.0/remote/caching", + "versions/6.5.0/remote/sandbox", + "versions/6.5.0/remote/workspace", + "versions/6.5.0/remote/cache-remote", + "versions/6.5.0/remote/cache-local", + "versions/6.5.0/remote/output-directories", + "versions/6.5.0/remote/persistent", + "versions/6.5.0/remote/multiplex", + "versions/6.5.0/remote/creating", + "versions/6.5.0/remote/bep", + "versions/6.5.0/remote/bep-examples", + "versions/6.5.0/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "6.5.0/tutorials/cpp-use-cases", - "6.5.0/tutorials/ccp-toolchain-config", - "6.5.0/tutorials/cpp-dependency", - "6.5.0/tutorials/cpp-labels" + "versions/6.5.0/tutorials/cpp-use-cases", + "versions/6.5.0/tutorials/ccp-toolchain-config", + "versions/6.5.0/tutorials/cpp-dependency", + "versions/6.5.0/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "6.5.0/migrate", - "6.5.0/migrate/maven", - "6.5.0/migrate/xcode" + "versions/6.5.0/migrate", + "versions/6.5.0/migrate/maven", + "versions/6.5.0/migrate/xcode" ] } ] @@ -2063,34 +2063,34 @@ { "group": "Build encyclopedia", "pages": [ - "6.5.0/reference/be/overview", - "6.5.0/reference/be/common-definitions", - "6.5.0/reference/be/make-variables", - "6.5.0/reference/be/functions" + "versions/6.5.0/reference/be/overview", + "versions/6.5.0/reference/be/common-definitions", + "versions/6.5.0/reference/be/make-variables", + "versions/6.5.0/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "6.5.0/reference/command-line-reference" + "versions/6.5.0/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "6.5.0/query/language" + "versions/6.5.0/query/language" ] }, { "group": "Glossary", "pages": [ - "6.5.0/reference/glossary" + "versions/6.5.0/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "6.5.0/reference/flag-cheatsheet" + "versions/6.5.0/reference/flag-cheatsheet" ] } ] @@ -2101,35 +2101,35 @@ { "group": "Concepts", "pages": [ - "6.5.0/extending/concepts" + "versions/6.5.0/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "6.5.0/rules/rules-tutorial", - "6.5.0/rules/macro-tutorial", - "6.5.0/rules/legacy-macro-tutorial", - "6.5.0/rules/verbs-tutorial", - "6.5.0/rules/language", - "6.5.0/rules/bzl-style", - "6.5.0/rules/challenges", - "6.5.0/rules/windows" + "versions/6.5.0/rules/rules-tutorial", + "versions/6.5.0/rules/macro-tutorial", + "versions/6.5.0/rules/legacy-macro-tutorial", + "versions/6.5.0/rules/verbs-tutorial", + "versions/6.5.0/rules/language", + "versions/6.5.0/rules/bzl-style", + "versions/6.5.0/rules/challenges", + "versions/6.5.0/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "6.5.0/rules/testing", - "6.5.0/rules/performance", - "6.5.0/rules/deploying" + "versions/6.5.0/rules/testing", + "versions/6.5.0/rules/performance", + "versions/6.5.0/rules/deploying" ] }, { "group": "APIs", "pages": [ - "6.5.0/rules/lib/overview", - "6.5.0/rules/lib/globals" + "versions/6.5.0/rules/lib/overview", + "versions/6.5.0/rules/lib/globals" ] } ] @@ -2140,36 +2140,36 @@ { "group": "Contributing", "pages": [ - "6.5.0/contribute", - "6.5.0/contribute/policy", - "6.5.0/contribute/patch-acceptance", - "6.5.0/contribute/maintainers-guide", - "6.5.0/contribute/codebase", - "6.5.0/contribute/search", - "6.5.0/contribute/statemachine-guide", - "6.5.0/contribute/docs", - "6.5.0/contribute/docs-style-guide", - "6.5.0/contribute/design-documents", - "6.5.0/contribute/release-notes" + "versions/6.5.0/contribute", + "versions/6.5.0/contribute/policy", + "versions/6.5.0/contribute/patch-acceptance", + "versions/6.5.0/contribute/maintainers-guide", + "versions/6.5.0/contribute/codebase", + "versions/6.5.0/contribute/search", + "versions/6.5.0/contribute/statemachine-guide", + "versions/6.5.0/contribute/docs", + "versions/6.5.0/contribute/docs-style-guide", + "versions/6.5.0/contribute/design-documents", + "versions/6.5.0/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "6.5.0/community/update", - "6.5.0/community/sig", - "6.5.0/community/experts", - "6.5.0/community/partners", - "6.5.0/community/users", - "6.5.0/community/recommended-rules", - "6.5.0/community/remote-execution-services" + "versions/6.5.0/community/update", + "versions/6.5.0/community/sig", + "versions/6.5.0/community/experts", + "versions/6.5.0/community/partners", + "versions/6.5.0/community/users", + "versions/6.5.0/community/recommended-rules", + "versions/6.5.0/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "6.5.0/help", - "6.5.0/contribute/policy" + "versions/6.5.0/help", + "versions/6.5.0/contribute/policy" ] } ] @@ -2190,23 +2190,23 @@ { "group": "Why Bazel?", "pages": [ - "5.4.1/about/intro", - "5.4.1/about/why", - "5.4.1/about/vision", - "5.4.1/about/roadmap", - "5.4.1/about/faq" + "versions/5.4.1/about/intro", + "versions/5.4.1/about/why", + "versions/5.4.1/about/vision", + "versions/5.4.1/about/roadmap", + "versions/5.4.1/about/faq" ] }, { "group": "Build system basics", "pages": [ - "5.4.1/basics", - "5.4.1/basics/build-systems", - "5.4.1/basics/task-based-builds", - "5.4.1/basics/artifact-based-builds", - "5.4.1/basics/distributed-builds", - "5.4.1/basics/dependencies", - "5.4.1/basics/hermeticity" + "versions/5.4.1/basics", + "versions/5.4.1/basics/build-systems", + "versions/5.4.1/basics/task-based-builds", + "versions/5.4.1/basics/artifact-based-builds", + "versions/5.4.1/basics/distributed-builds", + "versions/5.4.1/basics/dependencies", + "versions/5.4.1/basics/hermeticity" ] } ] @@ -2217,37 +2217,37 @@ { "group": "Install", "pages": [ - "5.4.1/install", - "5.4.1/install/bazelisk", - "5.4.1/install/os-x", - "5.4.1/install/windows", - "5.4.1/install/ubuntu", - "5.4.1/install/suse", - "5.4.1/install/docker-container", - "5.4.1/install/compile-source", - "5.4.1/install/completion", - "5.4.1/install/ide" + "versions/5.4.1/install", + "versions/5.4.1/install/bazelisk", + "versions/5.4.1/install/os-x", + "versions/5.4.1/install/windows", + "versions/5.4.1/install/ubuntu", + "versions/5.4.1/install/suse", + "versions/5.4.1/install/docker-container", + "versions/5.4.1/install/compile-source", + "versions/5.4.1/install/completion", + "versions/5.4.1/install/ide" ] }, { "group": "First build tutorials", "pages": [ - "5.4.1/start/cpp", - "5.4.1/start/java", - "5.4.1/start/android-app", - "5.4.1/start/ios-app" + "versions/5.4.1/start/cpp", + "versions/5.4.1/start/java", + "versions/5.4.1/start/android-app", + "versions/5.4.1/start/ios-app" ] }, { "group": "Concepts", "pages": [ - "5.4.1/concepts/build-ref", - "5.4.1/concepts/labels", - "5.4.1/concepts/build-files", - "5.4.1/concepts/dependencies", - "5.4.1/concepts/visibility", - "5.4.1/concepts/platforms", - "5.4.1/concepts/hermeticity" + "versions/5.4.1/concepts/build-ref", + "versions/5.4.1/concepts/labels", + "versions/5.4.1/concepts/build-files", + "versions/5.4.1/concepts/dependencies", + "versions/5.4.1/concepts/visibility", + "versions/5.4.1/concepts/platforms", + "versions/5.4.1/concepts/hermeticity" ] } ] @@ -2258,72 +2258,72 @@ { "group": "Releases", "pages": [ - "5.4.1/release", - "5.4.1/release/rolling", - "5.4.1/release/backward-compatibility", - "5.4.1/release/rule-compatibility" + "versions/5.4.1/release", + "versions/5.4.1/release/rolling", + "versions/5.4.1/release/backward-compatibility", + "versions/5.4.1/release/rule-compatibility" ] }, { "group": "Basics", "pages": [ - "5.4.1/build/style-guide", - "5.4.1/build/share-variables", - "5.4.1/community/recommended-rules", - "5.4.1/run/build" + "versions/5.4.1/build/style-guide", + "versions/5.4.1/build/share-variables", + "versions/5.4.1/community/recommended-rules", + "versions/5.4.1/run/build" ] }, { "group": "Advanced", "pages": [ - "5.4.1/configure/attributes", - "5.4.1/configure/integrate-cpp", - "5.4.1/configure/coverage", - "5.4.1/configure/best-practices", - "5.4.1/configure/windows", - "5.4.1/advanced/performance/build-performance-metrics", - "5.4.1/advanced/performance/build-performance-breakdown", - "5.4.1/advanced/performance/json-trace-profile", - "5.4.1/advanced/performance/memory", - "5.4.1/advanced/performance/iteration-speed" + "versions/5.4.1/configure/attributes", + "versions/5.4.1/configure/integrate-cpp", + "versions/5.4.1/configure/coverage", + "versions/5.4.1/configure/best-practices", + "versions/5.4.1/configure/windows", + "versions/5.4.1/advanced/performance/build-performance-metrics", + "versions/5.4.1/advanced/performance/build-performance-breakdown", + "versions/5.4.1/advanced/performance/json-trace-profile", + "versions/5.4.1/advanced/performance/memory", + "versions/5.4.1/advanced/performance/iteration-speed" ] }, { "group": "Remote Execution", "pages": [ - "5.4.1/remote/rbe", - "5.4.1/remote/rules", - "5.4.1/remote/ci", - "5.4.1/remote/dynamic", - "5.4.1/remote/caching", - "5.4.1/remote/sandbox", - "5.4.1/remote/workspace", - "5.4.1/remote/cache-remote", - "5.4.1/remote/cache-local", - "5.4.1/remote/output-directories", - "5.4.1/remote/persistent", - "5.4.1/remote/multiplex", - "5.4.1/remote/creating", - "5.4.1/remote/bep", - "5.4.1/remote/bep-examples", - "5.4.1/remote/bep-glossary" + "versions/5.4.1/remote/rbe", + "versions/5.4.1/remote/rules", + "versions/5.4.1/remote/ci", + "versions/5.4.1/remote/dynamic", + "versions/5.4.1/remote/caching", + "versions/5.4.1/remote/sandbox", + "versions/5.4.1/remote/workspace", + "versions/5.4.1/remote/cache-remote", + "versions/5.4.1/remote/cache-local", + "versions/5.4.1/remote/output-directories", + "versions/5.4.1/remote/persistent", + "versions/5.4.1/remote/multiplex", + "versions/5.4.1/remote/creating", + "versions/5.4.1/remote/bep", + "versions/5.4.1/remote/bep-examples", + "versions/5.4.1/remote/bep-glossary" ] }, { "group": "Tutorials", "pages": [ - "5.4.1/tutorials/cpp-use-cases", - "5.4.1/tutorials/ccp-toolchain-config", - "5.4.1/tutorials/cpp-dependency", - "5.4.1/tutorials/cpp-labels" + "versions/5.4.1/tutorials/cpp-use-cases", + "versions/5.4.1/tutorials/ccp-toolchain-config", + "versions/5.4.1/tutorials/cpp-dependency", + "versions/5.4.1/tutorials/cpp-labels" ] }, { "group": "Migrate", "pages": [ - "5.4.1/migrate", - "5.4.1/migrate/maven", - "5.4.1/migrate/xcode" + "versions/5.4.1/migrate", + "versions/5.4.1/migrate/maven", + "versions/5.4.1/migrate/xcode" ] } ] @@ -2334,34 +2334,34 @@ { "group": "Build encyclopedia", "pages": [ - "5.4.1/reference/be/overview", - "5.4.1/reference/be/common-definitions", - "5.4.1/reference/be/make-variables", - "5.4.1/reference/be/functions" + "versions/5.4.1/reference/be/overview", + "versions/5.4.1/reference/be/common-definitions", + "versions/5.4.1/reference/be/make-variables", + "versions/5.4.1/reference/be/functions" ] }, { "group": "Command line reference", "pages": [ - "5.4.1/reference/command-line-reference" + "versions/5.4.1/reference/command-line-reference" ] }, { "group": "Query Language", "pages": [ - "5.4.1/query/language" + "versions/5.4.1/query/language" ] }, { "group": "Glossary", "pages": [ - "5.4.1/reference/glossary" + "versions/5.4.1/reference/glossary" ] }, { "group": "Flag cheatsheet", "pages": [ - "5.4.1/reference/flag-cheatsheet" + "versions/5.4.1/reference/flag-cheatsheet" ] } ] @@ -2372,35 +2372,35 @@ { "group": "Concepts", "pages": [ - "5.4.1/extending/concepts" + "versions/5.4.1/extending/concepts" ] }, { "group": "Writing rules", "pages": [ - "5.4.1/rules/rules-tutorial", - "5.4.1/rules/macro-tutorial", - "5.4.1/rules/legacy-macro-tutorial", - "5.4.1/rules/verbs-tutorial", - "5.4.1/rules/language", - "5.4.1/rules/bzl-style", - "5.4.1/rules/challenges", - "5.4.1/rules/windows" + "versions/5.4.1/rules/rules-tutorial", + "versions/5.4.1/rules/macro-tutorial", + "versions/5.4.1/rules/legacy-macro-tutorial", + "versions/5.4.1/rules/verbs-tutorial", + "versions/5.4.1/rules/language", + "versions/5.4.1/rules/bzl-style", + "versions/5.4.1/rules/challenges", + "versions/5.4.1/rules/windows" ] }, { "group": "Distributing rules", "pages": [ - "5.4.1/rules/testing", - "5.4.1/rules/performance", - "5.4.1/rules/deploying" + "versions/5.4.1/rules/testing", + "versions/5.4.1/rules/performance", + "versions/5.4.1/rules/deploying" ] }, { "group": "APIs", "pages": [ - "5.4.1/rules/lib/overview", - "5.4.1/rules/lib/globals" + "versions/5.4.1/rules/lib/overview", + "versions/5.4.1/rules/lib/globals" ] } ] @@ -2411,36 +2411,36 @@ { "group": "Contributing", "pages": [ - "5.4.1/contribute", - "5.4.1/contribute/policy", - "5.4.1/contribute/patch-acceptance", - "5.4.1/contribute/maintainers-guide", - "5.4.1/contribute/codebase", - "5.4.1/contribute/search", - "5.4.1/contribute/statemachine-guide", - "5.4.1/contribute/docs", - "5.4.1/contribute/docs-style-guide", - "5.4.1/contribute/design-documents", - "5.4.1/contribute/release-notes" + "versions/5.4.1/contribute", + "versions/5.4.1/contribute/policy", + "versions/5.4.1/contribute/patch-acceptance", + "versions/5.4.1/contribute/maintainers-guide", + "versions/5.4.1/contribute/codebase", + "versions/5.4.1/contribute/search", + "versions/5.4.1/contribute/statemachine-guide", + "versions/5.4.1/contribute/docs", + "versions/5.4.1/contribute/docs-style-guide", + "versions/5.4.1/contribute/design-documents", + "versions/5.4.1/contribute/release-notes" ] }, { "group": "Programs", "pages": [ - "5.4.1/community/update", - "5.4.1/community/sig", - "5.4.1/community/experts", - "5.4.1/community/partners", - "5.4.1/community/users", - "5.4.1/community/recommended-rules", - "5.4.1/community/remote-execution-services" + "versions/5.4.1/community/update", + "versions/5.4.1/community/sig", + "versions/5.4.1/community/experts", + "versions/5.4.1/community/partners", + "versions/5.4.1/community/users", + "versions/5.4.1/community/recommended-rules", + "versions/5.4.1/community/remote-execution-services" ] }, { "group": "Getting help", "pages": [ - "5.4.1/help", - "5.4.1/contribute/policy" + "versions/5.4.1/help", + "versions/5.4.1/contribute/policy" ] } ] diff --git a/docs.json.update.sh b/docs.json.update.sh index 04caf0d..54c7bc3 100755 --- a/docs.json.update.sh +++ b/docs.json.update.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash # Create docs.json with versioned navigation + set -euo pipefail # Read the versions and tabs @@ -35,10 +36,10 @@ for version in $VERSIONS; do VERSIONS_JSON="$VERSIONS_JSON," fi - # For other versions, add version prefix to paths and strip patch version + # For other versions, add "version/" prefix to paths TABS_JSON=$(jq -c --arg version "$version" ' - map(.groups = (.groups | map(.pages = (.pages | map($version + "/" + .))))) - ' "$TABS_FILE") + map(.groups = (.groups | map(.pages = (.pages | map("versions/" + $version + "/" + .))))) +' "$TABS_FILE") DISPLAY_VERSION=$(echo "$version" | sed 's/\.[0-9]*$//') VERSIONS_JSON="$VERSIONS_JSON{\"version\":\"$DISPLAY_VERSION\",\"languages\":[{\"language\":\"en\",\"tabs\":$TABS_JSON}]}" diff --git a/rules/lib/providers/DebugPackageInfo.mdx b/rules/lib/providers/DebugPackageInfo.mdx deleted file mode 100644 index 7955cb9..0000000 --- a/rules/lib/providers/DebugPackageInfo.mdx +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: 'DebugPackageInfo' ---- - - - -A provider for the binary file and its associated .dwp files, if fission is enabled.If Fission ({@url https://gcc.gnu.org/wiki/DebugFission}) is not enabled, the dwp file will be null. - -## Members - -- [DebugPackageInfo](#DebugPackageInfo) -- [dwp\_file](#dwp_file) -- [stripped\_file](#stripped_file) -- [target\_label](#target_label) -- [unstripped\_file](#unstripped_file) - -## DebugPackageInfo - -``` -DebugPackageInfo DebugPackageInfo(*, target_label, stripped_file=None, unstripped_file, dwp_file=None) -``` - - The `DebugPackageInfo` constructor. - - -### Parameters - -ParameterDescription`target_label`[Label](../builtins/Label.html); - required - - The label for the \*\_binary target - `stripped_file`[File](../builtins/File.html); or `None`; - default is `None` - - The stripped file (the explicit ".stripped" target) - `unstripped_file`[File](../builtins/File.html); - required - - The unstripped file (the default executable target). - `dwp_file`[File](../builtins/File.html); or `None`; - default is `None` - - The .dwp file (for fission builds) or null if --fission=no. - - -## dwp\_file - -``` -File DebugPackageInfo.dwp_file -``` - - Returns the .dwp file (for fission builds) or null if --fission=no. - May return `None`. - - - -## stripped\_file - -``` -File DebugPackageInfo.stripped_file -``` - - Returns the stripped file (the explicit ".stripped" target). - May return `None`. - - - -## target\_label - -``` -Label DebugPackageInfo.target_label -``` - - Returns the label for the \*\_binary target - - - -## unstripped\_file - -``` -File DebugPackageInfo.unstripped_file -``` - - Returns the unstripped file (the default executable target) diff --git a/rules/lib/toplevel/java_common.mdx b/rules/lib/toplevel/java_common.mdx deleted file mode 100644 index f3dae10..0000000 --- a/rules/lib/toplevel/java_common.mdx +++ /dev/null @@ -1,274 +0,0 @@ ---- -title: 'java\_common' ---- - - - -Utilities for Java compilation support in Starlark. - -## Members - -- [BootClassPathInfo](#BootClassPathInfo) -- [compile](#compile) -- [JavaRuntimeInfo](#JavaRuntimeInfo) -- [JavaToolchainInfo](#JavaToolchainInfo) -- [merge](#merge) -- [pack\_sources](#pack_sources) -- [run\_ijar](#run_ijar) -- [stamp\_jar](#stamp_jar) - -## BootClassPathInfo - -``` -Provider java_common.BootClassPathInfo -``` - - The provider used to supply bootclasspath information - - - -## compile - -``` -struct java_common.compile(ctx, *, source_jars=[], source_files=[], output, output_source_jar=None, javac_opts=[], deps=[], runtime_deps=[], exports=[], plugins=[], exported_plugins=[], native_libraries=[], annotation_processor_additional_inputs=[], annotation_processor_additional_outputs=[], strict_deps='ERROR', java_toolchain, bootclasspath=None, sourcepath=[], resources=[], resource_jars=[], classpath_resources=[], neverlink=False, enable_annotation_processing=True, enable_compile_jar_action=True, add_exports=[], add_opens=[]) -``` - - Compiles Java source files/jars from the implementation of a Starlark rule and returns a provider that represents the results of the compilation and can be added to the set of providers emitted by this rule. - - -### Parameters - -ParameterDescription`ctx`[ctx](../builtins/ctx.html); - required - - The rule context. - `source_jars`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of the jars to be compiled. At least one of source\_jars or source\_files should be specified. - `source_files`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of the Java source files to be compiled. At least one of source\_jars or source\_files should be specified. - `output`[File](../builtins/File.html); - required - -`output_source_jar`[File](../builtins/File.html); or `None`; - default is `None` - - The output source jar. Defaults to \`{output\_jar}-src.jar\` if unset. - `javac_opts`[sequence](../core/list.html) of [string](../core/string.html) s; - default is `[]` - - A list of the desired javac options. - `deps`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; - default is `[]` - - A list of dependencies. - `runtime_deps`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; - default is `[]` - - A list of runtime dependencies. - `exports`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; - default is `[]` - - A list of exports. - `plugins`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; or [sequence](../core/list.html) of [struct](../builtins/struct.html) s; - default is `[]` - - A list of plugins. - `exported_plugins`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; or [sequence](../core/list.html) of [struct](../builtins/struct.html) s; - default is `[]` - - A list of exported plugins. - `native_libraries`[sequence](../core/list.html) of [CcInfo](../providers/CcInfo.html) s; - default is `[]` - - CC native library dependencies that are needed for this library. - `annotation_processor_additional_inputs`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of inputs that the Java compilation action will take in addition to the Java sources for annotation processing. - `annotation_processor_additional_outputs`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of outputs that the Java compilation action will output in addition to the class jar from annotation processing. - `strict_deps`[string](../core/string.html); - default is `'ERROR'` - - A string that specifies how to handle strict deps. Possible values: 'OFF', 'ERROR', 'WARN' and 'DEFAULT'. For more details see [`--strict_java_deps flag`](/docs/user-manual#flag--strict_java_deps) `. By default 'ERROR'. - ``java_toolchain` - Info; - required - - A JavaToolchainInfo to be used for this compilation. Mandatory. - `bootclasspath` - default is `None` - - A BootClassPathInfo to be used for this compilation. If present, overrides the bootclasspath associated with the provided java\_toolchain. - `sourcepath`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - -`resources`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - -`resource_jars`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - -`classpath_resources`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - -`neverlink`[bool](../core/bool.html); - default is `False` - -`enable_annotation_processing`[bool](../core/bool.html); - default is `True` - - Disables annotation processing in this compilation, causing any annotation processors provided in plugins or in exported\_plugins of deps to be ignored. - `enable_compile_jar_action`[bool](../core/bool.html); - default is `True` - - Enables header compilation or ijar creation. If set to False, it forces use of the full class jar in the compilation classpaths of any dependants. Doing so is intended for use by non-library targets such as binaries that do not have dependants. - `add_exports`[sequence](../core/list.html) of [string](../core/string.html) s; - default is `[]` - - Allow this library to access the given /. - `add_opens`[sequence](../core/list.html) of [string](../core/string.html) s; - default is `[]` - - Allow this library to reflectively access the given /. - - -## JavaRuntimeInfo - -``` -Provider java_common.JavaRuntimeInfo -``` - - The key used to retrieve the provider that contains information about the Java runtime being used. - - - -## JavaToolchainInfo - -``` -Provider java_common.JavaToolchainInfo -``` - - The key used to retrieve the provider that contains information about the Java toolchain being used. - - - -## merge - -``` -struct java_common.merge(providers) -``` - - Merges the given providers into a single JavaInfo. - - -### Parameters - -ParameterDescription`providers`[sequence](../core/list.html) of [struct](../builtins/struct.html) s; - required - - The list of providers to merge. - - -## pack\_sources - -``` -File java_common.pack_sources(actions, *, output_source_jar=None, sources=[], source_jars=[], java_toolchain) -``` - - Packs sources and source jars into a single source jar file. The return value is typically passed to - -`JavaInfo#source_jar` - -.At least one of parameters output\_jar or output\_source\_jar is required. - - -### Parameters - -ParameterDescription`actions`[actions](../builtins/actions.html); - required - - ctx.actions - `output_source_jar`[File](../builtins/File.html); or `None`; - default is `None` - - The output source jar. - `sources`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of Java source files to be packed into the source jar. - `source_jars`[sequence](../core/list.html) of [File](../builtins/File.html) s; - default is `[]` - - A list of source jars to be packed into the source jar. - `java_toolchain` - Info; - required - - A JavaToolchainInfo to used to find the ijar tool. - - -## run\_ijar - -``` -File java_common.run_ijar(actions, *, jar, target_label=None, java_toolchain) -``` - - Runs ijar on a jar, stripping it of its method bodies. This helps reduce rebuilding of dependent jars during any recompiles consisting only of simple changes to method implementations. The return value is typically passed to `JavaInfo#compile_jar`. - - -### Parameters - -ParameterDescription`actions`[actions](../builtins/actions.html); - required - - ctx.actions - `jar`[File](../builtins/File.html); - required - - The jar to run ijar on. - `target_label`[Label](../builtins/Label.html); or `None`; - default is `None` - - A target label to stamp the jar with. Used for `add_dep` support. Typically, you would pass `ctx.label` to stamp the jar with the current rule's label. - `java_toolchain` - Info; - required - - A JavaToolchainInfo to used to find the ijar tool. - - -## stamp\_jar - -``` -File java_common.stamp_jar(actions, *, jar, target_label, java_toolchain) -``` - - Stamps a jar with a target label for `add_dep` support. The return value is typically passed to `JavaInfo#compile_jar`. Prefer to use `run_ijar` when possible. - - -### Parameters - -ParameterDescription`actions`[actions](../builtins/actions.html); - required - - ctx.actions - `jar`[File](../builtins/File.html); - required - - The jar to run stamp\_jar on. - `target_label`[Label](../builtins/Label.html); - required - - A target label to stamp the jar with. Used for `add_dep` support. Typically, you would pass `ctx.label` to stamp the jar with the current rule's label. - `java_toolchain` - Info; - required - - A JavaToolchainInfo to used to find the stamp\_jar tool. diff --git a/update_upstream_locally.sh b/update_upstream_locally.sh index e758780..f421819 100755 --- a/update_upstream_locally.sh +++ b/update_upstream_locally.sh @@ -59,11 +59,21 @@ fi run ./cleanup-mdx.sh +# Ensure the submodule is on the latest master for the HEAD build +( + cd upstream + echo "+ Updating submodule to latest master..." + git fetch origin + git checkout master + git reset --hard origin/master +) + +# Now that the submodule is on master, generate the reference docs for HEAD run ./run-in-go-docker.sh "$REFERENCE_ZIP" - +# Generate the HEAD docs in the root directory run ./copy-upstream-docs.sh -run ./docs.json.update.sh rm -rf "reference-docs-temp" + echo "Workflow reproduction completed successfully." \ No newline at end of file diff --git a/upstream b/upstream index 60b1e19..03cd9f3 160000 --- a/upstream +++ b/upstream @@ -1 +1 @@ -Subproject commit 60b1e19baa4df5148bdc0a5ec8edb4cb6671fcc1 +Subproject commit 03cd9f39fef45bc5d172bd101a1c9aab305eacfd diff --git a/versions/5.4.1/reference/be/be-nav.mdx b/versions/5.4.1/reference/be/be-nav.mdx new file mode 100644 index 0000000..fbf211d --- /dev/null +++ b/versions/5.4.1/reference/be/be-nav.mdx @@ -0,0 +1,27 @@ +\*\*Build Encyclopedia\*\* +- [Overview](/reference/be/overview.html) +- [Concepts](#be-menu) - [Common Definitions](/reference/be/common-definitions.html) + - ["Make" variables](/reference/be/make-variables.html) +- [Rules](#be-rules) - [Functions](/reference/be/functions.html) + - [C / C++](/reference/be/c-cpp.html) + - [Java](/reference/be/java.html) + - [Objective-C](/reference/be/objective-c.html) + - [Protocol Buffer](/reference/be/protocol-buffer.html) + - [Python](/reference/be/python.html) + - [Shell](/reference/be/shell.html) + - [Extra Actions](/reference/be/extra-actions.html) + - [General](/reference/be/general.html) + - [Platforms and Toolchains](/reference/be/platforms-and-toolchains.html) + - [AppEngine](https://github.com/bazelbuild/rules_appengine) + - [Apple (Swift, iOS, macOS, tvOS, visionOS, watchOS)](https://github.com/bazelbuild/rules_apple) + - [C#](https://github.com/bazelbuild/rules_dotnet) + - [D](https://github.com/bazelbuild/rules_d) + - [Docker](https://github.com/bazelbuild/rules_docker) + - [Groovy](https://github.com/bazelbuild/rules_groovy) + - [Go](https://github.com/bazelbuild/rules_go) + - [JavaScript (Closure)](https://github.com/bazelbuild/rules_closure) + - [Jsonnet](https://github.com/bazelbuild/rules_jsonnet) + - [Packaging](/reference/be/pkg.html) + - [Rust](https://github.com/bazelbuild/rules_rust) + - [Sass](https://github.com/bazelbuild/rules_sass) + - [Scala](https://github.com/bazelbuild/rules_scala) diff --git a/versions/5.4.1/reference/be/c-cpp.mdx b/versions/5.4.1/reference/be/c-cpp.mdx new file mode 100644 index 0000000..01cacc9 --- /dev/null +++ b/versions/5.4.1/reference/be/c-cpp.mdx @@ -0,0 +1,2460 @@ +--- +title: 'C / C++ Rules' +--- + + + +## Rules + +- [cc\_binary](#cc_binary) +- [cc\_import](#cc_import) +- [cc\_library](#cc_library) +- [cc\_shared\_library](#cc_shared_library) +- [cc\_static\_library](#cc_static_library) +- [cc\_test](#cc_test) +- [cc\_toolchain](#cc_toolchain) +- [fdo\_prefetch\_hints](#fdo_prefetch_hints) +- [fdo\_profile](#fdo_profile) +- [memprof\_profile](#memprof_profile) +- [propeller\_optimize](#propeller_optimize) + +## cc\_binary + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_binary.bzl) + +``` +cc_binary(name, deps, srcs, data, additional_linker_inputs, args, aspect_hints, compatible_with, conlyopts, copts, cxxopts, defines, deprecation, distribs, dynamic_deps, env, exec_compatible_with, exec_group_compatible_with, exec_properties, features, hdrs_check, includes, licenses, link_extra_lib, linkopts, linkshared, linkstatic, local_defines, malloc, module_interfaces, nocopts, output_licenses, package_metadata, reexport_deps, restricted_to, stamp, tags, target_compatible_with, testonly, toolchains, visibility, win_def_file) +``` + +It produces an executable binary. + +The `name` of the target should be the same as the name of the +source file that is the main entry point of the application (minus the extension). +For example, if your entry point is in `main.cc`, then your name should +be `main`. + +#### Implicit output targets + +- `name.stripped` (only built if explicitly requested): A stripped + version of the binary. `strip -g` is run on the binary to remove debug + symbols. Additional strip options can be provided on the command line using + `--stripopt=-foo`. +- `name.dwp` (only built if explicitly requested): If + [Fission](https://gcc.gnu.org/wiki/DebugFission) is enabled: a debug + information package file suitable for debugging remotely deployed binaries. Else: an + empty file. + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of other libraries to be linked in to the binary target. + +These can be `cc_library` or `objc_library` +targets. + +It is also allowed to +put linker scripts (.lds) into deps, and reference them in +[linkopts](#cc_binary.linkopts). + `srcs` + +List of [labels](/concepts/labels); default is `[]` + + The list of C and C++ files that are processed to create the library target. +These are C/C++ source and header files, either non-generated (normal source +code) or generated. + +All `.cc`, `.c`, and `.cpp` files will +be compiled. These might be generated files: if a named file is in +the `outs` of some other rule, this `cc_library` +will automatically depend on that other rule. + +Pure assembler files (.s, .asm) are not preprocessed and are typically built using +the assembler. Preprocessed assembly files (.S) are preprocessed and are typically built +using the C/C++ compiler. + +A `.h` file will not be compiled, but will be available for +inclusion by sources in this rule. Both `.cc` and +`.h` files can directly include headers listed in +these `srcs` or in the `hdrs` of this rule or any +rule listed in the `deps` argument. + +All `#include` d files must be mentioned in the +`hdrs` attribute of this or referenced `cc_library` +rules, or they should be listed in `srcs` if they are private +to this library. See ["Header inclusion checking"](#hdrs) for +a more detailed description. + +`.so`, `.lo`, and `.a` files are +pre-compiled files. Your library might have these as +`srcs` if it uses third-party code for which we don't +have source code. + +If the `srcs` attribute includes the label of another rule, +`cc_library` will use the output files of that rule as source files to +compile. This is useful for one-off generation of source code (for more than occasional +use, it's better to implement a Starlark rule class and use the `cc_common` +API) + +Permitted `srcs` file types: + +- C and C++ source files: `.c`, `.cc`, `.cpp`, + `.cxx`, `.c++`, `.C` +- C and C++ header files: `.h`, `.hh`, `.hpp`, + `.hxx`, `.inc`, `.inl`, `.H` +- Assembler with C preprocessor: `.S` +- Archive: `.a`, `.pic.a` +- "Always link" library: `.lo`, `.pic.lo` +- Shared library, versioned or unversioned: `.so`, + `.so.version` +- Object file: `.o`, `.pic.o` + +... and any rules that produce those files (e.g. `cc_embed_data`). +Different extensions denote different programming languages in +accordance with gcc convention. + +`data` + +List of [labels](/concepts/labels); default is `[]` + + The list of files needed by this library at runtime. + +See general comments about `data` +at [Typical attributes defined by\ +most build rules](/reference/be/common-definitions#typical-attributes). + +If a `data` is the name of a generated file, then this +`cc_library` rule automatically depends on the generating +rule. + +If a `data` is a rule name, then this +`cc_library` rule automatically depends on that rule, +and that rule's `outs` are automatically added to +this `cc_library`'s data files. + +Your C++ code can access these data files like so: + +```lang-starlark + + const std::string path = devtools_build::GetDataDependencyFilepath( + "my/test/data/file"); + +``` + +`additional_linker_inputs` + +List of [labels](/concepts/labels); default is `[]` + + Pass these files to the C++ linker command. + +For example, compiled Windows .res files can be provided here to be embedded in +the binary target. + +`conlyopts` + +List of strings; default is `[]` + + Add these options to the C compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `copts` + +List of strings; default is `[]` + + Add these options to the C/C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + +Each string in this attribute is added in the given order to `COPTS` before +compiling the binary target. The flags take effect only for compiling this target, not +its dependencies, so be careful about header files included elsewhere. +All paths should be relative to the workspace, not to the current package. +This attribute should not be needed outside of `third_party`. + +If the package declares the [feature](/reference/be/functions.html#package.features) `no_copts_tokenization`, Bourne shell tokenization applies only to strings +that consist of a single "Make" variable. + +`cxxopts` + +List of strings; default is `[]` + + Add these options to the C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line to this target, +as well as to every rule that depends on it. Be very careful, since this may have +far-reaching effects. When in doubt, add define values to +[`local_defines`](#cc_binary.local_defines) instead. + `distribs` + +List of strings; default is `[]` + +`dynamic_deps` + +List of [labels](/concepts/labels); default is `[]` + + These are other `cc_shared_library` dependencies the current target depends on. + +The `cc_shared_library` implementation will use the list of +`dynamic_deps` (transitively, i.e. also the `dynamic_deps` of the +current target's `dynamic_deps`) to decide which `cc_libraries` in +the transitive `deps` should not be linked in because they are already provided +by a different `cc_shared_library`. + + +`hdrs_check` + +String; default is `""` + + Deprecated, no-op. + `includes` + +List of strings; default is `[]` + + List of include dirs to be added to the compile line. +Subject to ["Make variable"](/reference/be/make-variables) substitution. +Each string is prepended with the package path and passed to the C++ toolchain for +expansion via the "include\_paths" CROSSTOOL feature. A toolchain running on a POSIX system +with typical feature definitions will produce +`-isystem path_to_package/include_entry`. +This should only be used for third-party libraries that +do not conform to the Google style of writing #include statements. +Unlike [COPTS](#cc_binary.copts), these flags are added for this rule +and every rule that depends on it. (Note: not the rules it depends upon!) Be +very careful, since this may have far-reaching effects. When in doubt, add +"-I" flags to [COPTS](#cc_binary.copts) instead. + +The added `include` paths will include generated files as well as +files in the source tree. + +`link_extra_lib` + +[Label](/concepts/labels); default is `"@bazel_tools//tools/cpp:link_extra_lib"` + + Control linking of extra libraries. + +By default, C++ binaries are linked against `//tools/cpp:link_extra_lib`, +which by default depends on the label flag `//tools/cpp:link_extra_libs`. +Without setting the flag, this library is empty by default. Setting the label flag +allows linking optional dependencies, such as overrides for weak symbols, interceptors +for shared library functions, or special runtime libraries (for malloc replacements, +prefer `malloc` or `--custom_malloc`). Setting this attribute to +`None` disables this behaviour. + +`linkopts` + +List of strings; default is `[]` + + Add these flags to the C++ linker command. +Subject to ["Make" variable](make-variables.html) substitution, +[Bourne shell tokenization](common-definitions.html#sh-tokenization) and +[label expansion](common-definitions.html#label-expansion). +Each string in this attribute is added to `LINKOPTS` before +linking the binary target. + +Each element of this list that does not start with `$` or `-` is +assumed to be the label of a target in `deps`. The +list of files generated by that target is appended to the linker +options. An error is reported if the label is invalid, or is +not declared in `deps`. + +`linkshared` + +Boolean; default is `False` + + Create a shared library. +To enable this attribute, include `linkshared=True` in your rule. By default +this option is off. + +The presence of this flag means that linking occurs with the `-shared` flag +to `gcc`, and the resulting shared library is suitable for loading into for +example a Java program. However, for build purposes it will never be linked into the +dependent binary, as it is assumed that shared libraries built with a +[cc\_binary](#cc_binary) rule are only loaded manually by other programs, so +it should not be considered a substitute for the [cc\_library](#cc_library) +rule. For sake of scalability we recommend avoiding this approach altogether and +simply letting `java_library` depend on `cc_library` rules +instead. + +If you specify both `linkopts=['-static']` and `linkshared=True`, +you get a single completely self-contained unit. If you specify both +`linkstatic=True` and `linkshared=True`, you get a single, mostly +self-contained unit. + +`linkstatic` + +Boolean; default is `True` + + For [`cc_binary`](/reference/be/c-cpp.html#cc_binary) and +[`cc_test`](/reference/be/c-cpp.html#cc_test): link the binary in static +mode. For `cc_library.link_static`: see below. + +By default this option is on for `cc_binary` and off for the rest. + +If enabled and this is a binary or test, this option tells the build tool to link in +`.a`'s instead of `.so`'s for user libraries whenever possible. +System libraries such as libc (but _not_ the C/C++ runtime libraries, +see below) are still linked dynamically, as are libraries for which +there is no static library. So the resulting executable will still be dynamically +linked, hence only _mostly_ static. + +There are really three different ways to link an executable: + +- STATIC with fully\_static\_link feature, in which everything is linked statically; + e.g. " `gcc -static foo.o libbar.a libbaz.a -lm`". + + + This mode is enabled by specifying `fully_static_link` in the + [`features`](/reference/be/common-definitions#features) attribute. +- STATIC, in which all user libraries are linked statically (if a static + version is available), but where system libraries (excluding C/C++ runtime libraries) + are linked dynamically, e.g. " `gcc foo.o libfoo.a libbaz.a -lm`". + + + This mode is enabled by specifying `linkstatic=True`. +- DYNAMIC, in which all libraries are linked dynamically (if a dynamic version is + available), e.g. " `gcc foo.o libfoo.so libbaz.so -lm`". + + + This mode is enabled by specifying `linkstatic=False`. + +If the `linkstatic` attribute or `fully_static_link` in +`features` is used outside of `//third_party` +please include a comment near the rule to explain why. + +The `linkstatic` attribute has a different meaning if used on a +[`cc_library()`](/reference/be/c-cpp.html#cc_library) rule. +For a C++ library, `linkstatic=True` indicates that only +static linking is allowed, so no `.so` will be produced. linkstatic=False does +not prevent static libraries from being created. The attribute is meant to control the +creation of dynamic libraries. + +There should be very little code built with `linkstatic=False` in production. +If `linkstatic=False`, then the build tool will create symlinks to +depended-upon shared libraries in the `*.runfiles` area. + +`local_defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line for this target, +but not to its dependents. + `malloc` + +[Label](/concepts/labels); default is `"@bazel_tools//tools/cpp:malloc"` + + Override the default dependency on malloc. + +By default, C++ binaries are linked against `//tools/cpp:malloc`, +which is an empty library so the binary ends up using libc malloc. +This label must refer to a `cc_library`. If compilation is for a non-C++ +rule, this option has no effect. The value of this attribute is ignored if +`linkshared=True` is specified. + +`module_interfaces` + +List of [labels](/concepts/labels); default is `[]` + + The list of files are regarded as C++20 Modules Interface. + +C++ Standard has no restriction about module interface file extension + +- Clang use cppm +- GCC can use any source file extension +- MSVC use ixx + +The use is guarded by the flag +`--experimental_cpp_modules`. + +`nocopts` + +String; default is `""` + + Remove matching options from the C++ compilation command. +Subject to ["Make" variable](/reference/be/make-variables) substitution. +The value of this attribute is interpreted as a regular expression. +Any preexisting `COPTS` that match this regular expression +(including values explicitly specified in the rule's [copts](#cc_binary.copts) attribute) +will be removed from `COPTS` for purposes of compiling this rule. +This attribute should not be needed or used +outside of `third_party`. The values are not preprocessed +in any way other than the "Make" variable substitution. + `reexport_deps` + +List of [labels](/concepts/labels); default is `[]` + +`stamp` + +Integer; default is `-1` + + Whether to encode build information into the binary. Possible values: + +- `stamp = 1`: Always stamp the build information into the binary, even in + [`--nostamp`](/docs/user-manual#flag--stamp) builds. **This** + **setting should be avoided**, since it potentially kills remote caching for the + binary and any downstream actions that depend on it. + +- `stamp = 0`: Always replace build information by constant values. This + gives good build result caching. + +- `stamp = -1`: Embedding of build information is controlled by the + [`--[no]stamp`](/docs/user-manual#flag--stamp) flag. + + +Stamped binaries are _not_ rebuilt unless their dependencies change. + +`win_def_file` + +[Label](/concepts/labels); default is `None` + + The Windows DEF file to be passed to linker. + +This attribute should only be used when Windows is the target platform. +It can be used to [export symbols](https://msdn.microsoft.com/en-us/library/d91k01sh.aspx) during linking a shared library. + +## cc\_import + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_import.bzl) + +``` +cc_import(name, deps, data, hdrs, alwayslink, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, includes, interface_library, linkopts, objects, package_metadata, pic_objects, pic_static_library, restricted_to, shared_library, static_library, strip_include_prefix, system_provided, tags, target_compatible_with, testonly, toolchains, visibility) +``` + +`cc_import` rules allows users to import precompiled C/C++ libraries. + +The following are the typical use cases: + +1\. Linking a static library + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + static_library = "libmylib.a", + # If alwayslink is turned on, + # libmylib.a will be forcely linked into any binary that depends on it. + # alwayslink = 1, +) + +``` + +2\. Linking a shared library (Unix) + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + shared_library = "libmylib.so", +) + +``` + +3\. Linking a shared library with interface library + +On Unix: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + # libmylib.ifso is an interface library for libmylib.so which will be passed to linker + interface_library = "libmylib.ifso", + # libmylib.so will be available for runtime + shared_library = "libmylib.so", +) + +``` + +On Windows: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + # mylib.lib is an import library for mylib.dll which will be passed to linker + interface_library = "mylib.lib", + # mylib.dll will be available for runtime + shared_library = "mylib.dll", +) + +``` + +4\. Linking a shared library with `system_provided=True` + +On Unix: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + interface_library = "libmylib.ifso", # Or we can also use libmylib.so as its own interface library + # libmylib.so is provided by system environment, for example it can be found in LD_LIBRARY_PATH. + # This indicates that Bazel is not responsible for making libmylib.so available. + system_provided = 1, +) + +``` + +On Windows: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + # mylib.lib is an import library for mylib.dll which will be passed to linker + interface_library = "mylib.lib", + # mylib.dll is provided by system environment, for example it can be found in PATH. + # This indicates that Bazel is not responsible for making mylib.dll available. + system_provided = 1, +) + +``` + +5\. Linking to static or shared library + +On Unix: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + static_library = "libmylib.a", + shared_library = "libmylib.so", +) + +``` + +On Windows: + +```lang-starlark + +cc_import( + name = "mylib", + hdrs = ["mylib.h"], + static_library = "libmylib.lib", # A normal static library + interface_library = "mylib.lib", # An import library for mylib.dll + shared_library = "mylib.dll", +) + +``` + +The remaining is the same on Unix and Windows: + +```lang-starlark + +# first will link to libmylib.a (or libmylib.lib) +cc_binary( + name = "first", + srcs = ["first.cc"], + deps = [":mylib"], + linkstatic = 1, # default value +) + +# second will link to libmylib.so (or libmylib.lib) +cc_binary( + name = "second", + srcs = ["second.cc"], + deps = [":mylib"], + linkstatic = 0, +) + +``` + +`cc_import` supports an include attribute. For example: + +```lang-starlark + +cc_import( + name = "curl_lib", + hdrs = glob(["vendor/curl/include/curl/*.h"]), + includes = ["vendor/curl/include"], + shared_library = "vendor/curl/lib/.libs/libcurl.dylib", +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of other libraries that the target depends upon. +See general comments about `deps` +at [Typical attributes defined by\ +most build rules](/reference/be/common-definitions#typical-attributes). + `hdrs` + +List of [labels](/concepts/labels); default is `[]` + + The list of header files published by +this precompiled library to be directly included by sources in dependent rules. + `alwayslink` + +Boolean; default is `False` + + If 1, any binary that depends (directly or indirectly) on this C++ +precompiled library will link in all the object files archived in the static library, +even if some contain no symbols referenced by the binary. +This is useful if your code isn't explicitly called by code in +the binary, e.g., if your code registers to receive some callback +provided by some service. + +If alwayslink doesn't work with VS 2017 on Windows, that is due to a +[known issue](https://github.com/bazelbuild/bazel/issues/3949), +please upgrade your VS 2017 to the latest version. + +`includes` + +List of strings; default is `[]` + + List of include dirs to be added to the compile line. +Subject to ["Make variable"](/reference/be/make-variables) substitution. +Each string is prepended with the package path and passed to the C++ toolchain for +expansion via the "include\_paths" CROSSTOOL feature. A toolchain running on a POSIX system +with typical feature definitions will produce +`-isystem path_to_package/include_entry`. +This should only be used for third-party libraries that +do not conform to the Google style of writing #include statements. +Unlike [COPTS](#cc_binary.copts), these flags are added for this rule +and every rule that depends on it. (Note: not the rules it depends upon!) Be +very careful, since this may have far-reaching effects. When in doubt, add +"-I" flags to [COPTS](#cc_binary.copts) instead. + +The default `include` path doesn't include generated +files. If you need to `#include` a generated header +file, list it in the `srcs`. + +`interface_library` + +[Label](/concepts/labels); default is `None` + + A single interface library for linking the shared library. + +Permitted file types: +`.ifso`, +`.tbd`, +`.lib`, +`.so` +or `.dylib` + +`linkopts` + +List of strings; default is `[]` + + Add these flags to the C++ linker command. +Subject to ["Make" variable](make-variables.html) substitution, +[Bourne shell tokenization](common-definitions.html#sh-tokenization) and +[label expansion](common-definitions.html#label-expansion). +Each string in this attribute is added to `LINKOPTS` before +linking the binary target. + +Each element of this list that does not start with `$` or `-` is +assumed to be the label of a target in `deps`. The +list of files generated by that target is appended to the linker +options. An error is reported if the label is invalid, or is +not declared in `deps`. + +`objects` + +List of [labels](/concepts/labels); default is `[]` + +`pic_objects` + +List of [labels](/concepts/labels); default is `[]` + +`pic_static_library` + +[Label](/concepts/labels); default is `None` + +`shared_library` + +[Label](/concepts/labels); default is `None` + + A single precompiled shared library. Bazel ensures it is available to the +binary that depends on it during runtime. + +Permitted file types: +`.so`, +`.dll` +or `.dylib` + +`static_library` + +[Label](/concepts/labels); default is `None` + + A single precompiled static library. + +Permitted file types: +`.a`, +`.pic.a` +or `.lib` + +`strip_include_prefix` + +String; default is `""` + + The prefix to strip from the paths of the headers of this rule. + +When set, the headers in the `hdrs` attribute of this rule are accessible +at their path with this prefix cut off. + +If it's a relative path, it's taken as a package-relative one. If it's an absolute one, +it's understood as a repository-relative path. + +The prefix in the `include_prefix` attribute is added after this prefix is +stripped. + +This attribute is only legal under `third_party`. + + +`system_provided` + +Boolean; default is `False` + + If 1, it indicates the shared library required at runtime is provided by the system. In +this case, `interface_library` should be specified and +`shared_library` should be empty. + + +## cc\_library + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_library.bzl) + +``` +cc_library(name, deps, srcs, data, hdrs, additional_compiler_inputs, additional_linker_inputs, alwayslink, aspect_hints, compatible_with, conlyopts, copts, cxxopts, defines, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, hdrs_check, implementation_deps, include_prefix, includes, licenses, linkopts, linkstamp, linkstatic, local_defines, module_interfaces, package_metadata, restricted_to, strip_include_prefix, tags, target_compatible_with, testonly, textual_hdrs, toolchains, visibility, win_def_file) +``` + +Use `cc_library()` for C++-compiled libraries. +The result is either a `.so`, `.lo`, +or `.a`, depending on what is needed. + +If you build something with static linking that depends on +a `cc_library`, the output of a depended-on library rule +is the `.a` file. If you specify +`alwayslink=True`, you get the `.lo` file. + +The actual output file name is `libfoo.so` for +the shared library, where _foo_ is the name of the rule. The +other kinds of libraries end with `.lo` and `.a`, +respectively. If you need a specific shared library name, for +example, to define a Python module, use a genrule to copy the library +to the desired name. + +#### Header inclusion checking + +All header files that are used in the build must be declared in +the `hdrs` or `srcs` of `cc_*` rules. +This is enforced. + +For `cc_library` rules, headers in `hdrs` comprise the +public interface of the library and can be directly included both +from the files in `hdrs` and `srcs` of the library +itself as well as from files in `hdrs` and `srcs` +of `cc_*` rules that list the library in their `deps`. +Headers in `srcs` must only be directly included from the files +in `hdrs` and `srcs` of the library itself. When +deciding whether to put a header into `hdrs` or `srcs`, +you should ask whether you want consumers of this library to be able to +directly include it. This is roughly the same decision as +between `public` and `private` visibility in programming languages. + +`cc_binary` and `cc_test` rules do not have an exported +interface, so they also do not have a `hdrs` attribute. All headers +that belong to the binary or test directly should be listed in +the `srcs`. + +To illustrate these rules, look at the following example. + +```lang-starlark + +cc_binary( + name = "foo", + srcs = [ + "foo.cc", + "foo.h", + ], + deps = [":bar"], +) + +cc_library( + name = "bar", + srcs = [ + "bar.cc", + "bar-impl.h", + ], + hdrs = ["bar.h"], + deps = [":baz"], +) + +cc_library( + name = "baz", + srcs = [ + "baz.cc", + "baz-impl.h", + ], + hdrs = ["baz.h"], +) + +``` + +The allowed direct inclusions in this example are listed in the table below. +For example `foo.cc` is allowed to directly +include `foo.h` and `bar.h`, but not `baz.h`. + +Including fileAllowed inclusionsfoo.hbar.hfoo.ccfoo.h bar.hbar.hbar-impl.h baz.hbar-impl.hbar.h baz.hbar.ccbar.h bar-impl.h baz.hbaz.hbaz-impl.hbaz-impl.hbaz.hbaz.ccbaz.h baz-impl.h + +The inclusion checking rules only apply to _direct_ +inclusions. In the example above `foo.cc` is allowed to +include `bar.h`, which may include `baz.h`, which in +turn is allowed to include `baz-impl.h`. Technically, the +compilation of a `.cc` file may transitively include any header +file in the `hdrs` or `srcs` in +any `cc_library` in the transitive `deps` closure. In +this case the compiler may read `baz.h` and `baz-impl.h` +when compiling `foo.cc`, but `foo.cc` must not +contain `#include "baz.h"`. For that to be +allowed, `baz` must be added to the `deps` +of `foo`. + +Bazel depends on toolchain support to enforce the inclusion checking rules. +The `layering_check` feature has to be supported by the toolchain +and requested explicitly, for example via the +`--features=layering_check` command-line flag or the +`features` parameter of the +[`package`](/reference/be/functions.html#package) function. The toolchains +provided by Bazel only support this feature with clang on Unix and macOS. + +#### Examples + +We use the `alwayslink` flag to force the linker to link in +this code although the main binary code doesn't reference it. + +```lang-starlark + +cc_library( + name = "ast_inspector_lib", + srcs = ["ast_inspector_lib.cc"], + hdrs = ["ast_inspector_lib.h"], + visibility = ["//visibility:public"], + deps = ["//third_party/llvm/llvm/tools/clang:frontend"], + # alwayslink as we want to be able to call things in this library at + # debug time, even if they aren't used anywhere in the code. + alwayslink = 1, +) + +``` + +The following example comes from +`third_party/python2_4_3/BUILD`. +Some of the code uses the `dl` library (to load +another, dynamic library), so this +rule specifies the `-ldl` link option to link the +`dl` library. + +```lang-starlark + +cc_library( + name = "python2_4_3", + linkopts = [ + "-ldl", + "-lutil", + ], + deps = ["//third_party/expat"], +) + +``` + +The following example comes from `third_party/kde/BUILD`. +We keep pre-built `.so` files in the depot. +The header files live in a subdirectory named `include`. + +```lang-starlark + +cc_library( + name = "kde", + srcs = [ + "lib/libDCOP.so", + "lib/libkdesu.so", + "lib/libkhtml.so", + "lib/libkparts.so", + ...more .so files..., + ], + includes = ["include"], + deps = ["//third_party/X11"], +) + +``` + +The following example comes from `third_party/gles/BUILD`. +Third-party code often needs some `defines` and +`linkopts`. + +```lang-starlark + +cc_library( + name = "gles", + srcs = [ + "GLES/egl.h", + "GLES/gl.h", + "ddx.c", + "egl.c", + ], + defines = [ + "USE_FLOAT", + "__GL_FLOAT", + "__GL_COMMON", + ], + linkopts = ["-ldl"], # uses dlopen(), dl library + deps = [ + "es", + "//third_party/X11", + ], +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of other libraries that the library target depends upon. + +These can be `cc_library` or `objc_library` targets. + +See general comments about `deps` +at [Typical attributes defined by\ +most build rules](/reference/be/common-definitions#typical-attributes). + +These should be names of C++ library rules. +When you build a binary that links this rule's library, +you will also link the libraries in `deps`. + +Despite the "deps" name, not all of this library's clients +belong here. Run-time data dependencies belong in `data`. +Source files generated by other rules belong in `srcs`. + +To link in a pre-compiled third-party library, add its name to +the `srcs` instead. + +To depend on something without linking it to this library, add its +name to the `data` instead. + +`srcs` + +List of [labels](/concepts/labels); default is `[]` + + The list of C and C++ files that are processed to create the library target. +These are C/C++ source and header files, either non-generated (normal source +code) or generated. + +All `.cc`, `.c`, and `.cpp` files will +be compiled. These might be generated files: if a named file is in +the `outs` of some other rule, this `cc_library` +will automatically depend on that other rule. + +Pure assembler files (.s, .asm) are not preprocessed and are typically built using +the assembler. Preprocessed assembly files (.S) are preprocessed and are typically built +using the C/C++ compiler. + +A `.h` file will not be compiled, but will be available for +inclusion by sources in this rule. Both `.cc` and +`.h` files can directly include headers listed in +these `srcs` or in the `hdrs` of this rule or any +rule listed in the `deps` argument. + +All `#include` d files must be mentioned in the +`hdrs` attribute of this or referenced `cc_library` +rules, or they should be listed in `srcs` if they are private +to this library. See ["Header inclusion checking"](#hdrs) for +a more detailed description. + +`.so`, `.lo`, and `.a` files are +pre-compiled files. Your library might have these as +`srcs` if it uses third-party code for which we don't +have source code. + +If the `srcs` attribute includes the label of another rule, +`cc_library` will use the output files of that rule as source files to +compile. This is useful for one-off generation of source code (for more than occasional +use, it's better to implement a Starlark rule class and use the `cc_common` +API) + +Permitted `srcs` file types: + +- C and C++ source files: `.c`, `.cc`, `.cpp`, + `.cxx`, `.c++`, `.C` +- C and C++ header files: `.h`, `.hh`, `.hpp`, + `.hxx`, `.inc`, `.inl`, `.H` +- Assembler with C preprocessor: `.S` +- Archive: `.a`, `.pic.a` +- "Always link" library: `.lo`, `.pic.lo` +- Shared library, versioned or unversioned: `.so`, + `.so.version` +- Object file: `.o`, `.pic.o` + +... and any rules that produce those files (e.g. `cc_embed_data`). +Different extensions denote different programming languages in +accordance with gcc convention. + +`data` + +List of [labels](/concepts/labels); default is `[]` + + The list of files needed by this library at runtime. + +See general comments about `data` +at [Typical attributes defined by\ +most build rules](/reference/be/common-definitions#typical-attributes). + +If a `data` is the name of a generated file, then this +`cc_library` rule automatically depends on the generating +rule. + +If a `data` is a rule name, then this +`cc_library` rule automatically depends on that rule, +and that rule's `outs` are automatically added to +this `cc_library`'s data files. + +Your C++ code can access these data files like so: + +```lang-starlark + + const std::string path = devtools_build::GetDataDependencyFilepath( + "my/test/data/file"); + +``` + +`hdrs` + +List of [labels](/concepts/labels); default is `[]` + + The list of header files published by +this library to be directly included by sources in dependent rules. + +This is the strongly preferred location for declaring header files that +describe the interface for the library. These headers will be made +available for inclusion by sources in this rule or in dependent rules. +Headers not meant to be included by a client of this library should be +listed in the `srcs` attribute instead, even if they are +included by a published header. See ["Header inclusion\ +checking"](#hdrs) for a more detailed description. + +Permitted `headers` file types: +`.h`, +`.hh`, +`.hpp`, +`.hxx`. + +`additional_compiler_inputs` + +List of [labels](/concepts/labels); default is `[]` + + Any additional files you might want to pass to the compiler command line, such as sanitizer +ignorelists, for example. Files specified here can then be used in copts with the +$(location) function. + `additional_linker_inputs` + +List of [labels](/concepts/labels); default is `[]` + + Pass these files to the C++ linker command. + +For example, compiled Windows .res files can be provided here to be embedded in +the binary target. + +`alwayslink` + +Boolean; default is `False` + + If 1, any binary that depends (directly or indirectly) on this C++ +library will link in all the object files for the files listed in +`srcs`, even if some contain no symbols referenced by the binary. +This is useful if your code isn't explicitly called by code in +the binary, e.g., if your code registers to receive some callback +provided by some service. + +If alwayslink doesn't work with VS 2017 on Windows, that is due to a +[known issue](https://github.com/bazelbuild/bazel/issues/3949), +please upgrade your VS 2017 to the latest version. + +`conlyopts` + +List of strings; default is `[]` + + Add these options to the C compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `copts` + +List of strings; default is `[]` + + Add these options to the C/C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + +Each string in this attribute is added in the given order to `COPTS` before +compiling the binary target. The flags take effect only for compiling this target, not +its dependencies, so be careful about header files included elsewhere. +All paths should be relative to the workspace, not to the current package. +This attribute should not be needed outside of `third_party`. + +If the package declares the [feature](/reference/be/functions.html#package.features) `no_copts_tokenization`, Bourne shell tokenization applies only to strings +that consist of a single "Make" variable. + +`cxxopts` + +List of strings; default is `[]` + + Add these options to the C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line to this target, +as well as to every rule that depends on it. Be very careful, since this may have +far-reaching effects. When in doubt, add define values to +[`local_defines`](#cc_binary.local_defines) instead. + `hdrs_check` + +String; default is `""` + + Deprecated, no-op. + `implementation_deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of other libraries that the library target depends on. Unlike with +`deps`, the headers and include paths of these libraries (and all their +transitive deps) are only used for compilation of this library, and not libraries that +depend on it. Libraries specified with `implementation_deps` are still linked in +binary targets that depend on this library. + `include_prefix` + +String; default is `""` + + The prefix to add to the paths of the headers of this rule. + +When set, the headers in the `hdrs` attribute of this rule are accessible +at is the value of this attribute prepended to their repository-relative path. + +The prefix in the `strip_include_prefix` attribute is removed before this +prefix is added. + +This attribute is only legal under `third_party`. + + +`includes` + +List of strings; default is `[]` + + List of include dirs to be added to the compile line. +Subject to ["Make variable"](/reference/be/make-variables) substitution. +Each string is prepended with the package path and passed to the C++ toolchain for +expansion via the "include\_paths" CROSSTOOL feature. A toolchain running on a POSIX system +with typical feature definitions will produce +`-isystem path_to_package/include_entry`. +This should only be used for third-party libraries that +do not conform to the Google style of writing #include statements. +Unlike [COPTS](#cc_binary.copts), these flags are added for this rule +and every rule that depends on it. (Note: not the rules it depends upon!) Be +very careful, since this may have far-reaching effects. When in doubt, add +"-I" flags to [COPTS](#cc_binary.copts) instead. + +The added `include` paths will include generated files as well as +files in the source tree. + +`linkopts` + +List of strings; default is `[]` + + See [`cc_binary.linkopts`](/reference/be/c-cpp.html#cc_binary.linkopts). +The `linkopts` attribute is also applied to any target that +depends, directly or indirectly, on this library via `deps` +attributes (or via other attributes that are treated similarly: +the [`malloc`](/reference/be/c-cpp.html#cc_binary.malloc) +attribute of [`cc_binary`](/reference/be/c-cpp.html#cc_binary)). Dependency +linkopts take precedence over dependent linkopts (i.e. dependency linkopts +appear later in the command line). Linkopts specified in +[`--linkopt`](../user-manual.html#flag--linkopt) +take precedence over rule linkopts. + +Note that the `linkopts` attribute only applies +when creating `.so` files or executables, not +when creating `.a` or `.lo` files. +So if the `linkstatic=True` attribute is set, the +`linkopts` attribute has no effect on the creation of +this library, only on other targets which depend on this library. + +Also, it is important to note that "-Wl,-soname" or "-Xlinker -soname" +options are not supported and should never be specified in this attribute. + +The `.so` files produced by `cc_library` +rules are not linked against the libraries that they depend +on. If you're trying to create a shared library for use +outside of the main repository, e.g. for manual use +with `dlopen()` or `LD_PRELOAD`, +it may be better to use a `cc_binary` rule +with the `linkshared=True` attribute. +See [`cc_binary.linkshared`](/reference/be/c-cpp.html#cc_binary.linkshared). + +`linkstamp` + +[Label](/concepts/labels); default is `None` + + Simultaneously compiles and links the specified C++ source file into the final +binary. This trickery is required to introduce timestamp +information into binaries; if we compiled the source file to an +object file in the usual way, the timestamp would be incorrect. +A linkstamp compilation may not include any particular set of +compiler flags and so should not depend on any particular +header, compiler option, or other build variable. +_This option should only be needed in the_ +_`base` package._`linkstatic` + +Boolean; default is `False` + + For [`cc_binary`](/reference/be/c-cpp.html#cc_binary) and +[`cc_test`](/reference/be/c-cpp.html#cc_test): link the binary in static +mode. For `cc_library.link_static`: see below. + +By default this option is on for `cc_binary` and off for the rest. + +If enabled and this is a binary or test, this option tells the build tool to link in +`.a`'s instead of `.so`'s for user libraries whenever possible. +System libraries such as libc (but _not_ the C/C++ runtime libraries, +see below) are still linked dynamically, as are libraries for which +there is no static library. So the resulting executable will still be dynamically +linked, hence only _mostly_ static. + +There are really three different ways to link an executable: + +- STATIC with fully\_static\_link feature, in which everything is linked statically; + e.g. " `gcc -static foo.o libbar.a libbaz.a -lm`". + + + This mode is enabled by specifying `fully_static_link` in the + [`features`](/reference/be/common-definitions#features) attribute. +- STATIC, in which all user libraries are linked statically (if a static + version is available), but where system libraries (excluding C/C++ runtime libraries) + are linked dynamically, e.g. " `gcc foo.o libfoo.a libbaz.a -lm`". + + + This mode is enabled by specifying `linkstatic=True`. +- DYNAMIC, in which all libraries are linked dynamically (if a dynamic version is + available), e.g. " `gcc foo.o libfoo.so libbaz.so -lm`". + + + This mode is enabled by specifying `linkstatic=False`. + +If the `linkstatic` attribute or `fully_static_link` in +`features` is used outside of `//third_party` +please include a comment near the rule to explain why. + +The `linkstatic` attribute has a different meaning if used on a +[`cc_library()`](/reference/be/c-cpp.html#cc_library) rule. +For a C++ library, `linkstatic=True` indicates that only +static linking is allowed, so no `.so` will be produced. linkstatic=False does +not prevent static libraries from being created. The attribute is meant to control the +creation of dynamic libraries. + +There should be very little code built with `linkstatic=False` in production. +If `linkstatic=False`, then the build tool will create symlinks to +depended-upon shared libraries in the `*.runfiles` area. + +`local_defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line for this target, +but not to its dependents. + `module_interfaces` + +List of [labels](/concepts/labels); default is `[]` + + The list of files are regarded as C++20 Modules Interface. + +C++ Standard has no restriction about module interface file extension + +- Clang use cppm +- GCC can use any source file extension +- MSVC use ixx + +The use is guarded by the flag +`--experimental_cpp_modules`. + +`strip_include_prefix` + +String; default is `""` + + The prefix to strip from the paths of the headers of this rule. + +When set, the headers in the `hdrs` attribute of this rule are accessible +at their path with this prefix cut off. + +If it's a relative path, it's taken as a package-relative one. If it's an absolute one, +it's understood as a repository-relative path. + +The prefix in the `include_prefix` attribute is added after this prefix is +stripped. + +This attribute is only legal under `third_party`. + + +`textual_hdrs` + +List of [labels](/concepts/labels); default is `[]` + + The list of header files published by +this library to be textually included by sources in dependent rules. + +This is the location for declaring header files that cannot be compiled on their own; +that is, they always need to be textually included by other source files to build valid +code. + +`win_def_file` + +[Label](/concepts/labels); default is `None` + + The Windows DEF file to be passed to linker. + +This attribute should only be used when Windows is the target platform. +It can be used to [export symbols](https://msdn.microsoft.com/en-us/library/d91k01sh.aspx) during linking a shared library. + +## cc\_shared\_library + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_shared_library.bzl) + +``` +cc_shared_library(name, deps, additional_linker_inputs, aspect_hints, compatible_with, deprecation, dynamic_deps, exec_compatible_with, exec_group_compatible_with, exec_properties, experimental_disable_topo_sort_do_not_use_remove_before_7_0, exports_filter, features, package_metadata, restricted_to, roots, shared_lib_name, static_deps, tags, target_compatible_with, testonly, toolchains, user_link_flags, visibility, win_def_file) +``` + +It produces a shared library. + +#### Example + +``` +cc_shared_library( + name = "foo_shared", + deps = [ + ":foo", + ], + dynamic_deps = [ + ":bar_shared", + ], + additional_linker_inputs = [ + ":foo.lds", + ], + user_link_flags = [ + "-Wl,--version-script=$(location :foo.lds)", + ], +) +cc_library( + name = "foo", + srcs = ["foo.cc"], + hdrs = ["foo.h"], + deps = [ + ":bar", + ":baz", + ], +) +cc_shared_library( + name = "bar_shared", + shared_lib_name = "bar.so", + deps = [":bar"], +) +cc_library( + name = "bar", + srcs = ["bar.cc"], + hdrs = ["bar.h"], +) +cc_library( + name = "baz", + srcs = ["baz.cc"], + hdrs = ["baz.h"], +) + +``` + +In the example `foo_shared` statically links `foo` +and `baz`, the latter being a transitive dependency. It doesn't +link `bar` because it is already provided dynamically by the +`dynamic_dep` `bar_shared`. + +`foo_shared` uses a linker script \*.lds file to control which +symbols should be exported. The `cc_shared_library` rule logic does +not control which symbols get exported, it only uses what is assumed to be +exported to give errors during analysis phase if two shared libraries export the +same targets. + +Every direct dependency of `cc_shared_library` is assumed to be +exported. Therefore, Bazel assumes during analysis that `foo` is being +exported by `foo_shared`. `baz` is not assumed to be exported +by `foo_shared`. Every target matched by the `exports_filter` +is also assumed to be exported. + +Every single `cc_library` in the example should appear at most in one +`cc_shared_library`. If we wanted to link `baz` also into +`bar_shared` we would need to add +`tags = ["LINKABLE_MORE_THAN_ONCE"]` to `baz`. + +Due to the `shared_lib_name` attribute, the file produced by +`bar_shared` will have the name `bar.so` as opposed +to the name `libbar.so` that it would have by default on Linux. + +#### Errors + +##### `Two shared libraries in dependencies export the same symbols.` + +This will happen whenever you are creating a target with two different +`cc_shared_library` dependencies that export the same target. To fix this +you need to stop the libraries from being exported in one of the +`cc_shared_library` dependencies. + +##### `Two shared libraries in dependencies link the same library statically` + +This will happen whenever you are creating a new `cc_shared_library` with two +different `cc_shared_library` dependencies that link the same target statically. +Similar to the error with exports. + +One way to fix this is to stop linking the library into one of the +`cc_shared_library` dependencies. At the same time, the one that still links it +needs to export the library so that the one not linking it keeps visibility to +the symbols. Another way is to pull out a third library that exports the target. +A third way is to tag the culprit `cc_library` with `LINKABLE_MORE_THAN_ONCE` +but this fix should be rare and you should absolutely make sure that the +`cc_library` is indeed safe to link more than once. + +##### ``'//foo:foo' is already linked statically in '//bar:bar' but not exported` `` + +This means that a library in the transitive closure of your `deps` is reachable +without going through one of the `cc_shared_library` dependencies but is already +linked into a different `cc_shared_library` in `dynamic_deps` and is not +exported. + +The solution is to export it from the `cc_shared_library` dependency or pull out +a third `cc_shared_library` that exports it. + +##### `Do not place libraries which only contain a precompiled dynamic library in deps. ` + +If you have a precompiled dynamic library, this doesn't need to and cannot be +linked statically into the current `cc_shared_library` target that you are +currently creating. Therefore, it doesn't belong in `deps` of the +`cc_shared_library`. If this precompiled dynamic library is a dependency of one +of your `cc_libraries`, then the `cc_library` needs to depend on it +directly. + +##### `Trying to export a library already exported by a different shared library` + +You will see this error if on the current rule you are claiming to export a +target that is already being exported by one of your dynamic dependencies. + +To fix this, remove the target from `deps` and just rely on it from the dynamic +dependency or make sure that the `exports_filter` doesn't catch this target. + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + Top level libraries that will unconditionally be statically linked into the shared library +after being whole-archived. + +Any transitive library dependency of these direct deps will be linked into this shared +library as long as they have not already been linked by a `cc_shared_library` +in `dynamic_deps`. + +During analysis, the rule implementation will consider any target listed in +`deps` as being exported by the shared library in order to give errors when +multiple `cc_shared_libraries` export the same targets. The rule implementation +does not take care of informing the linker about which symbols should be exported by the +shared object. The user should take care of this via linker scripts or visibility +declarations in the source code. + +The implementation will also trigger errors whenever the same library is linked statically +into more than one `cc_shared_library`. This can be avoided by adding +`"LINKABLE_MORE_THAN_ONCE"` to the `cc_library.tags` or by listing +the \`cc\_library\` as an export of one of the shared libraries so that one can be made a +`dynamic_dep` of the other. + +`additional_linker_inputs` + +List of [labels](/concepts/labels); default is `[]` + + Any additional files that you may want to pass to the linker, for example, linker scripts. +You have to separately pass any linker flags that the linker needs in order to be aware +of this file. You can do so via the `user_link_flags` attribute. + `dynamic_deps` + +List of [labels](/concepts/labels); default is `[]` + + These are other `cc_shared_library` dependencies the current target depends on. + +The `cc_shared_library` implementation will use the list of +`dynamic_deps` (transitively, i.e. also the `dynamic_deps` of the +current target's `dynamic_deps`) to decide which `cc_libraries` in +the transitive `deps` should not be linked in because they are already provided +by a different `cc_shared_library`. + +`experimental_disable_topo_sort_do_not_use_remove_before_7_0` + +Boolean; default is `False` + +`exports_filter` + +List of strings; default is `[]` + + This attribute contains a list of targets that are claimed to be exported by the current +shared library. + +Any target `deps` is already understood to be exported by the shared library. +This attribute should be used to list any targets that are exported by the shared library +but are transitive dependencies of `deps`. + +Note that this attribute is not actually adding a dependency edge to those targets, the +dependency edge should instead be created by `deps`.The entries in this +attribute are just strings. Keep in mind that when placing a target in this attribute, +this is considered a claim that the shared library exports the symbols from that target. +The `cc_shared_library` logic doesn't actually handle telling the linker which +symbols should be exported. + +The following syntax is allowed: + +`//foo:__pkg__` to account for any target in foo/BUILD + +`//foo:__subpackages__` to account for any target in foo/BUILD or any other +package below foo/ like foo/bar/BUILD + +`roots` + +List of [labels](/concepts/labels); default is `[]` + +`shared_lib_name` + +String; default is `""` + + By default cc\_shared\_library will use a name for the shared library output file based on +the target's name and the platform. This includes an extension and sometimes a prefix. +Sometimes you may not want the default name, for example, when loading C++ shared libraries +for Python the default lib\* prefix is often not desired, in which case you can use this +attribute to choose a custom name. + `static_deps` + +List of strings; default is `[]` + +`user_link_flags` + +List of strings; default is `[]` + + Any additional flags that you may want to pass to the linker. For example, to make the +linker aware of a linker script passed via additional\_linker\_inputs you can use the +following: + +```lang-starlark + + cc_shared_library( + name = "foo_shared", + additional_linker_inputs = select({ + "//src/conditions:linux": [ + ":foo.lds", + ":additional_script.txt", + ], + "//conditions:default": []}), + user_link_flags = select({ + "//src/conditions:linux": [ + "-Wl,-rpath,kittens", + "-Wl,--version-script=$(location :foo.lds)", + "-Wl,--script=$(location :additional_script.txt)", + ], + "//conditions:default": []}), + ... + ) + +``` + +`win_def_file` + +[Label](/concepts/labels); default is `None` + + The Windows DEF file to be passed to linker. + +This attribute should only be used when Windows is the target platform. +It can be used to [export symbols](https://msdn.microsoft.com/en-us/library/d91k01sh.aspx) during linking a shared library. + +## cc\_static\_library + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_static_library.bzl) + +``` +cc_static_library(name, deps, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, package_metadata, restricted_to, tags, target_compatible_with, testonly, toolchains, visibility) +``` + + Produces a static library from a list of targets and their transitive dependencies. + +The resulting static library contains the object files of the targets listed in +`deps` as well as their transitive dependencies, with preference given to +`PIC` objects. + +#### Output groups + +##### `linkdeps` + +A text file containing the labels of those transitive dependencies of targets listed in +`deps` that did not contribute any object files to the static library, but do +provide at least one static, dynamic or interface library. The resulting static library +may require these libraries to be available at link time. + +##### `linkopts` + +A text file containing the user-provided `linkopts` of all transitive +dependencies of targets listed in `deps`. + +#### Duplicate symbols + +By default, the `cc_static_library` rule checks that the resulting static +library does not contain any duplicate symbols. If it does, the build fails with an error +message that lists the duplicate symbols and the object files containing them. + +This check can be disabled per target or per package by setting +`features = ["-symbol_check"]` or globally via +`--features=-symbol_check`. + +##### Toolchain support for `symbol_check` + +The auto-configured C++ toolchains shipped with Bazel support the +`symbol_check` feature on all platforms. Custom toolchains can add support for +it in one of two ways: + +- Implementing the `ACTION_NAMES.validate_static_library` action and + enabling it with the `symbol_check` feature. The tool set in the action is + invoked with two arguments, the static library to check for duplicate symbols and the + path of a file that must be created if the check passes. +- Having the `symbol_check` feature add archiver flags that cause the + action creating the static library to fail on duplicate symbols. + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of targets to combine into a static library, including all their transitive +dependencies. + +Dependencies that do not provide any object files are not included in the static +library, but their labels are collected in the file provided by the +`linkdeps` output group. + +## cc\_test + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_test.bzl) + +``` +cc_test(name, deps, srcs, data, additional_linker_inputs, args, aspect_hints, compatible_with, conlyopts, copts, cxxopts, defines, deprecation, distribs, dynamic_deps, env, env_inherit, exec_compatible_with, exec_group_compatible_with, exec_properties, features, flaky, hdrs_check, includes, licenses, link_extra_lib, linkopts, linkshared, linkstatic, local, local_defines, malloc, module_interfaces, nocopts, package_metadata, reexport_deps, restricted_to, shard_count, size, stamp, tags, target_compatible_with, testonly, timeout, toolchains, visibility, win_def_file) +``` + +A `cc_test()` rule compiles a test. Here, a test +is a binary wrapper around some testing code. + +_By default, C++ tests are dynamically linked._ + +To statically link a unit test, specify +[`linkstatic=True`](/reference/be/c-cpp.html#cc_binary.linkstatic). +It would probably be good to comment why your test needs +`linkstatic`; this is probably not obvious. + +#### Implicit output targets + +- `name.stripped` (only built if explicitly requested): A stripped + version of the binary. `strip -g` is run on the binary to remove debug + symbols. Additional strip options can be provided on the command line using + `--stripopt=-foo`. +- `name.dwp` (only built if explicitly requested): If + [Fission](https://gcc.gnu.org/wiki/DebugFission) is enabled: a debug + information package file suitable for debugging remotely deployed binaries. Else: an + empty file. + +See the [cc\_binary()](/reference/be/c-cpp.html#cc_binary_args) arguments, except that +the `stamp` argument is set to 0 by default for tests and +that `cc_test` has extra [attributes common to all test rules (\*\_test)](/reference/be/common-definitions#common-attributes-tests). + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + + The list of other libraries to be linked in to the binary target. + +These can be `cc_library` or `objc_library` +targets. + +It is also allowed to +put linker scripts (.lds) into deps, and reference them in +[linkopts](#cc_binary.linkopts). + `srcs` + +List of [labels](/concepts/labels); default is `[]` + + The list of C and C++ files that are processed to create the library target. +These are C/C++ source and header files, either non-generated (normal source +code) or generated. + +All `.cc`, `.c`, and `.cpp` files will +be compiled. These might be generated files: if a named file is in +the `outs` of some other rule, this `cc_library` +will automatically depend on that other rule. + +Pure assembler files (.s, .asm) are not preprocessed and are typically built using +the assembler. Preprocessed assembly files (.S) are preprocessed and are typically built +using the C/C++ compiler. + +A `.h` file will not be compiled, but will be available for +inclusion by sources in this rule. Both `.cc` and +`.h` files can directly include headers listed in +these `srcs` or in the `hdrs` of this rule or any +rule listed in the `deps` argument. + +All `#include` d files must be mentioned in the +`hdrs` attribute of this or referenced `cc_library` +rules, or they should be listed in `srcs` if they are private +to this library. See ["Header inclusion checking"](#hdrs) for +a more detailed description. + +`.so`, `.lo`, and `.a` files are +pre-compiled files. Your library might have these as +`srcs` if it uses third-party code for which we don't +have source code. + +If the `srcs` attribute includes the label of another rule, +`cc_library` will use the output files of that rule as source files to +compile. This is useful for one-off generation of source code (for more than occasional +use, it's better to implement a Starlark rule class and use the `cc_common` +API) + +Permitted `srcs` file types: + +- C and C++ source files: `.c`, `.cc`, `.cpp`, + `.cxx`, `.c++`, `.C` +- C and C++ header files: `.h`, `.hh`, `.hpp`, + `.hxx`, `.inc`, `.inl`, `.H` +- Assembler with C preprocessor: `.S` +- Archive: `.a`, `.pic.a` +- "Always link" library: `.lo`, `.pic.lo` +- Shared library, versioned or unversioned: `.so`, + `.so.version` +- Object file: `.o`, `.pic.o` + +... and any rules that produce those files (e.g. `cc_embed_data`). +Different extensions denote different programming languages in +accordance with gcc convention. + +`data` + +List of [labels](/concepts/labels); default is `[]` + + The list of files needed by this library at runtime. + +See general comments about `data` +at [Typical attributes defined by\ +most build rules](/reference/be/common-definitions#typical-attributes). + +If a `data` is the name of a generated file, then this +`cc_library` rule automatically depends on the generating +rule. + +If a `data` is a rule name, then this +`cc_library` rule automatically depends on that rule, +and that rule's `outs` are automatically added to +this `cc_library`'s data files. + +Your C++ code can access these data files like so: + +```lang-starlark + + const std::string path = devtools_build::GetDataDependencyFilepath( + "my/test/data/file"); + +``` + +`additional_linker_inputs` + +List of [labels](/concepts/labels); default is `[]` + + Pass these files to the C++ linker command. + +For example, compiled Windows .res files can be provided here to be embedded in +the binary target. + +`conlyopts` + +List of strings; default is `[]` + + Add these options to the C compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `copts` + +List of strings; default is `[]` + + Add these options to the C/C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + +Each string in this attribute is added in the given order to `COPTS` before +compiling the binary target. The flags take effect only for compiling this target, not +its dependencies, so be careful about header files included elsewhere. +All paths should be relative to the workspace, not to the current package. +This attribute should not be needed outside of `third_party`. + +If the package declares the [feature](/reference/be/functions.html#package.features) `no_copts_tokenization`, Bourne shell tokenization applies only to strings +that consist of a single "Make" variable. + +`cxxopts` + +List of strings; default is `[]` + + Add these options to the C++ compilation command. +Subject to ["Make variable"](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). + `defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line to this target, +as well as to every rule that depends on it. Be very careful, since this may have +far-reaching effects. When in doubt, add define values to +[`local_defines`](#cc_binary.local_defines) instead. + `distribs` + +List of strings; default is `[]` + +`dynamic_deps` + +List of [labels](/concepts/labels); default is `[]` + + These are other `cc_shared_library` dependencies the current target depends on. + +The `cc_shared_library` implementation will use the list of +`dynamic_deps` (transitively, i.e. also the `dynamic_deps` of the +current target's `dynamic_deps`) to decide which `cc_libraries` in +the transitive `deps` should not be linked in because they are already provided +by a different `cc_shared_library`. + + +`hdrs_check` + +String; default is `""` + + Deprecated, no-op. + `includes` + +List of strings; default is `[]` + + List of include dirs to be added to the compile line. +Subject to ["Make variable"](/reference/be/make-variables) substitution. +Each string is prepended with the package path and passed to the C++ toolchain for +expansion via the "include\_paths" CROSSTOOL feature. A toolchain running on a POSIX system +with typical feature definitions will produce +`-isystem path_to_package/include_entry`. +This should only be used for third-party libraries that +do not conform to the Google style of writing #include statements. +Unlike [COPTS](#cc_binary.copts), these flags are added for this rule +and every rule that depends on it. (Note: not the rules it depends upon!) Be +very careful, since this may have far-reaching effects. When in doubt, add +"-I" flags to [COPTS](#cc_binary.copts) instead. + +The added `include` paths will include generated files as well as +files in the source tree. + +`link_extra_lib` + +[Label](/concepts/labels); default is `"@bazel_tools//tools/cpp:link_extra_lib"` + + Control linking of extra libraries. + +By default, C++ binaries are linked against `//tools/cpp:link_extra_lib`, +which by default depends on the label flag `//tools/cpp:link_extra_libs`. +Without setting the flag, this library is empty by default. Setting the label flag +allows linking optional dependencies, such as overrides for weak symbols, interceptors +for shared library functions, or special runtime libraries (for malloc replacements, +prefer `malloc` or `--custom_malloc`). Setting this attribute to +`None` disables this behaviour. + +`linkopts` + +List of strings; default is `[]` + + Add these flags to the C++ linker command. +Subject to ["Make" variable](make-variables.html) substitution, +[Bourne shell tokenization](common-definitions.html#sh-tokenization) and +[label expansion](common-definitions.html#label-expansion). +Each string in this attribute is added to `LINKOPTS` before +linking the binary target. + +Each element of this list that does not start with `$` or `-` is +assumed to be the label of a target in `deps`. The +list of files generated by that target is appended to the linker +options. An error is reported if the label is invalid, or is +not declared in `deps`. + +`linkshared` + +Boolean; default is `False` + + Create a shared library. +To enable this attribute, include `linkshared=True` in your rule. By default +this option is off. + +The presence of this flag means that linking occurs with the `-shared` flag +to `gcc`, and the resulting shared library is suitable for loading into for +example a Java program. However, for build purposes it will never be linked into the +dependent binary, as it is assumed that shared libraries built with a +[cc\_binary](#cc_binary) rule are only loaded manually by other programs, so +it should not be considered a substitute for the [cc\_library](#cc_library) +rule. For sake of scalability we recommend avoiding this approach altogether and +simply letting `java_library` depend on `cc_library` rules +instead. + +If you specify both `linkopts=['-static']` and `linkshared=True`, +you get a single completely self-contained unit. If you specify both +`linkstatic=True` and `linkshared=True`, you get a single, mostly +self-contained unit. + +`linkstatic` + +Boolean; default is `False` + + For [`cc_binary`](/reference/be/c-cpp.html#cc_binary) and +[`cc_test`](/reference/be/c-cpp.html#cc_test): link the binary in static +mode. For `cc_library.link_static`: see below. + +By default this option is on for `cc_binary` and off for the rest. + +If enabled and this is a binary or test, this option tells the build tool to link in +`.a`'s instead of `.so`'s for user libraries whenever possible. +System libraries such as libc (but _not_ the C/C++ runtime libraries, +see below) are still linked dynamically, as are libraries for which +there is no static library. So the resulting executable will still be dynamically +linked, hence only _mostly_ static. + +There are really three different ways to link an executable: + +- STATIC with fully\_static\_link feature, in which everything is linked statically; + e.g. " `gcc -static foo.o libbar.a libbaz.a -lm`". + + + This mode is enabled by specifying `fully_static_link` in the + [`features`](/reference/be/common-definitions#features) attribute. +- STATIC, in which all user libraries are linked statically (if a static + version is available), but where system libraries (excluding C/C++ runtime libraries) + are linked dynamically, e.g. " `gcc foo.o libfoo.a libbaz.a -lm`". + + + This mode is enabled by specifying `linkstatic=True`. +- DYNAMIC, in which all libraries are linked dynamically (if a dynamic version is + available), e.g. " `gcc foo.o libfoo.so libbaz.so -lm`". + + + This mode is enabled by specifying `linkstatic=False`. + +If the `linkstatic` attribute or `fully_static_link` in +`features` is used outside of `//third_party` +please include a comment near the rule to explain why. + +The `linkstatic` attribute has a different meaning if used on a +[`cc_library()`](/reference/be/c-cpp.html#cc_library) rule. +For a C++ library, `linkstatic=True` indicates that only +static linking is allowed, so no `.so` will be produced. linkstatic=False does +not prevent static libraries from being created. The attribute is meant to control the +creation of dynamic libraries. + +There should be very little code built with `linkstatic=False` in production. +If `linkstatic=False`, then the build tool will create symlinks to +depended-upon shared libraries in the `*.runfiles` area. + +`local_defines` + +List of strings; default is `[]` + + List of defines to add to the compile line. +Subject to ["Make" variable](/reference/be/make-variables) substitution and +[Bourne shell tokenization](/reference/be/common-definitions#sh-tokenization). +Each string, which must consist of a single Bourne shell token, +is prepended with `-D` and added to the compile command line for this target, +but not to its dependents. + `malloc` + +[Label](/concepts/labels); default is `"@bazel_tools//tools/cpp:malloc"` + + Override the default dependency on malloc. + +By default, C++ binaries are linked against `//tools/cpp:malloc`, +which is an empty library so the binary ends up using libc malloc. +This label must refer to a `cc_library`. If compilation is for a non-C++ +rule, this option has no effect. The value of this attribute is ignored if +`linkshared=True` is specified. + +`module_interfaces` + +List of [labels](/concepts/labels); default is `[]` + + The list of files are regarded as C++20 Modules Interface. + +C++ Standard has no restriction about module interface file extension + +- Clang use cppm +- GCC can use any source file extension +- MSVC use ixx + +The use is guarded by the flag +`--experimental_cpp_modules`. + +`nocopts` + +String; default is `""` + + Remove matching options from the C++ compilation command. +Subject to ["Make" variable](/reference/be/make-variables) substitution. +The value of this attribute is interpreted as a regular expression. +Any preexisting `COPTS` that match this regular expression +(including values explicitly specified in the rule's [copts](#cc_binary.copts) attribute) +will be removed from `COPTS` for purposes of compiling this rule. +This attribute should not be needed or used +outside of `third_party`. The values are not preprocessed +in any way other than the "Make" variable substitution. + `reexport_deps` + +List of [labels](/concepts/labels); default is `[]` + +`stamp` + +Integer; default is `0` + + Whether to encode build information into the binary. Possible values: + +- `stamp = 1`: Always stamp the build information into the binary, even in + [`--nostamp`](/docs/user-manual#flag--stamp) builds. **This** + **setting should be avoided**, since it potentially kills remote caching for the + binary and any downstream actions that depend on it. + +- `stamp = 0`: Always replace build information by constant values. This + gives good build result caching. + +- `stamp = -1`: Embedding of build information is controlled by the + [`--[no]stamp`](/docs/user-manual#flag--stamp) flag. + + +Stamped binaries are _not_ rebuilt unless their dependencies change. + +`win_def_file` + +[Label](/concepts/labels); default is `None` + + The Windows DEF file to be passed to linker. + +This attribute should only be used when Windows is the target platform. +It can be used to [export symbols](https://msdn.microsoft.com/en-us/library/d91k01sh.aspx) during linking a shared library. + +## cc\_toolchain + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/cc_toolchain.bzl) + +``` +cc_toolchain(name, all_files, ar_files, as_files, aspect_hints, compatible_with, compiler_files, compiler_files_without_includes, coverage_files, deprecation, dwp_files, dynamic_runtime_lib, exec_compatible_with, exec_group_compatible_with, exec_properties, exec_transition_for_inputs, features, libc_top, licenses, linker_files, module_map, objcopy_files, output_licenses, package_metadata, restricted_to, static_runtime_lib, strip_files, supports_header_parsing, supports_param_files, tags, target_compatible_with, testonly, toolchain_config, toolchain_identifier, toolchains, visibility) +``` + +Represents a C++ toolchain. + +This rule is responsible for: + + + +- Collecting all artifacts needed for C++ actions to run. This is done by + attributes such as `all_files`, `compiler_files`, + `linker_files`, or other attributes ending with `_files`). These are + most commonly filegroups globbing all required files. + +- Generating correct command lines for C++ actions. This is done using + `CcToolchainConfigInfo` provider (details below). + + +Use `toolchain_config` attribute to configure the C++ toolchain. +See also this +[page](https://bazel.build/docs/cc-toolchain-config-reference) for elaborate C++ toolchain configuration and toolchain selection documentation. + +Use `tags = ["manual"]` in order to prevent toolchains from being built and configured +unnecessarily when invoking `bazel build //...` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`all_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts. These artifacts will be added as inputs to all +rules\_cc related actions (with the exception of actions that are using more precise sets of +artifacts from attributes below). Bazel assumes that `all_files` is a superset +of all other artifact-providing attributes (e.g. linkstamp compilation needs both compile +and link files, so it takes `all_files`). + +This is what `cc_toolchain.files` contains, and this is used by all Starlark +rules using C++ toolchain. + +`ar_files` + +[Label](/concepts/labels); default is `None` + + Collection of all cc\_toolchain artifacts required for archiving actions. + `as_files` + +[Label](/concepts/labels); default is `None` + + Collection of all cc\_toolchain artifacts required for assembly actions. + `compiler_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts required for compile actions. + `compiler_files_without_includes` + +[Label](/concepts/labels); default is `None` + + Collection of all cc\_toolchain artifacts required for compile actions in case when +input discovery is supported (currently Google-only). + `coverage_files` + +[Label](/concepts/labels); default is `None` + + Collection of all cc\_toolchain artifacts required for coverage actions. If not specified, +all\_files are used. + `dwp_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts required for dwp actions. + `dynamic_runtime_lib` + +[Label](/concepts/labels); default is `None` + + Dynamic library artifact for the C++ runtime library (e.g. libstdc++.so). + +This will be used when 'static\_link\_cpp\_runtimes' feature is enabled, and we're linking +dependencies dynamically. + +`exec_transition_for_inputs` + +Boolean; default is `False` + + Deprecated. No-op. + `libc_top` + +[Label](/concepts/labels); default is `None` + + A collection of artifacts for libc passed as inputs to compile/linking actions. + `linker_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts required for linking actions. + `module_map` + +[Label](/concepts/labels); default is `None` + + Module map artifact to be used for modular builds. + `objcopy_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts required for objcopy actions. + `output_licenses` + +List of strings; default is `[]` + +`static_runtime_lib` + +[Label](/concepts/labels); default is `None` + + Static library artifact for the C++ runtime library (e.g. libstdc++.a). + +This will be used when 'static\_link\_cpp\_runtimes' feature is enabled, and we're linking +dependencies statically. + +`strip_files` + +[Label](/concepts/labels); required + + Collection of all cc\_toolchain artifacts required for strip actions. + `supports_header_parsing` + +Boolean; default is `False` + + Set to True when cc\_toolchain supports header parsing actions. + `supports_param_files` + +Boolean; default is `True` + + Set to True when cc\_toolchain supports using param files for linking actions. + `toolchain_config` + +[Label](/concepts/labels); required + + The label of the rule providing `cc_toolchain_config_info`. + `toolchain_identifier` + +String; default is `""` + + The identifier used to match this cc\_toolchain with the corresponding +crosstool\_config.toolchain. + +Until issue [#5380](https://github.com/bazelbuild/bazel/issues/5380) is fixed +this is the recommended way of associating `cc_toolchain` with +`CROSSTOOL.toolchain`. It will be replaced by the `toolchain_config` +attribute ( [#5380](https://github.com/bazelbuild/bazel/issues/5380)). + +## fdo\_prefetch\_hints + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/fdo/fdo_prefetch_hints.bzl) + +``` +fdo_prefetch_hints(name, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, package_metadata, profile, restricted_to, tags, target_compatible_with, testonly, toolchains, visibility) +``` + +Represents an FDO prefetch hints profile that is either in the workspace. +Examples: + +```lang-starlark + +fdo_prefetch_hints( + name = "hints", + profile = "//path/to/hints:profile.afdo", +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`profile` + +[Label](/concepts/labels); required + + Label of the hints profile. The hints file has the .afdo extension +The label can also point to an fdo\_absolute\_path\_profile rule. + + +## fdo\_profile + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/fdo/fdo_profile.bzl) + +``` +fdo_profile(name, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, memprof_profile, package_metadata, profile, proto_profile, restricted_to, tags, target_compatible_with, testonly, toolchains, visibility) +``` + +Represents an FDO profile that is in the workspace. +Example: + +```lang-starlark + +fdo_profile( + name = "fdo", + profile = "//path/to/fdo:profile.zip", +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`memprof_profile` + +[Label](/concepts/labels); default is `None` + + Label of the MemProf profile. The profile is expected to have +either a .profdata extension (for an indexed/symbolized memprof +profile), or a .zip extension for a zipfile containing a memprof.profdata +file. + `profile` + +[Label](/concepts/labels); required + + Label of the FDO profile or a rule which generates it. The FDO file can have one of the +following extensions: .profraw for unindexed LLVM profile, .profdata for indexed LLVM +profile, .zip that holds an LLVM profraw profile, .afdo for AutoFDO profile, .xfdo for +XBinary profile. The label can also point to an fdo\_absolute\_path\_profile rule. + `proto_profile` + +[Label](/concepts/labels); default is `None` + + Label of the protobuf profile. + + +## memprof\_profile + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/fdo/memprof_profile.bzl) + +``` +memprof_profile(name, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, package_metadata, profile, restricted_to, tags, target_compatible_with, testonly, toolchains, visibility) +``` + +Represents a MEMPROF profile that is in the workspace. +Example: + +```lang-starlark + +memprof_profile( + name = "memprof", + profile = "//path/to/memprof:profile.afdo", +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`profile` + +[Label](/concepts/labels); required + + Label of the MEMPROF profile. The profile is expected to have +either a .profdata extension (for an indexed/symbolized memprof +profile), or a .zip extension for a zipfile containing a memprof.profdata +file. +The label can also point to an fdo\_absolute\_path\_profile rule. + + +## propeller\_optimize + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/starlark/builtins_bzl/common/cc/fdo/propeller_optimize.bzl) + +``` +propeller_optimize(name, aspect_hints, cc_profile, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, features, ld_profile, package_metadata, restricted_to, tags, target_compatible_with, testonly, toolchains, visibility) +``` + +Represents a Propeller optimization profile in the workspace. +Example: + +```lang-starlark + +propeller_optimize( + name = "layout", + cc_profile = "//path:cc_profile.txt", + ld_profile = "//path:ld_profile.txt" +) + +``` + +### Arguments + +Attributes`name` + +[Name](/concepts/labels#target-names); required + +A unique name for this target. + +`cc_profile` + +[Label](/concepts/labels); required + + Label of the profile passed to the various compile actions. This file has +the .txt extension. + `ld_profile` + +[Label](/concepts/labels); required + + Label of the profile passed to the link action. This file has +the .txt extension. diff --git a/versions/5.4.1/reference/be/common-definitions.mdx b/versions/5.4.1/reference/be/common-definitions.mdx new file mode 100644 index 0000000..4c9db5e --- /dev/null +++ b/versions/5.4.1/reference/be/common-definitions.mdx @@ -0,0 +1,794 @@ +--- +title: 'Common definitions' +--- + + + +This section defines various terms and concepts that are common to +many functions or build rules. + +## Contents + +- [Bourne shell tokenization](#sh-tokenization) +- [Label Expansion](#label-expansion) +- [Typical attributes defined by most build rules](#typical-attributes) +- [Attributes common to all build rules](#common-attributes) +- [Attributes common to all test rules (\*\_test)](#common-attributes-tests) +- [Attributes common to all binary rules (\*\_binary)](#common-attributes-binaries) +- [Configurable attributes](#configurable-attributes) +- [Implicit output targets](#implicit-outputs) + +## Bourne shell tokenization + +Certain string attributes of some rules are split into multiple +words according to the tokenization rules of the Bourne shell: +unquoted spaces delimit separate words, and single- and +double-quotes characters and backslashes are used to prevent +tokenization. + +Those attributes that are subject to this tokenization are +explicitly indicated as such in their definitions in this document. + +Attributes subject to "Make" variable expansion and Bourne shell +tokenization are typically used for passing arbitrary options to +compilers and other tools. Examples of such attributes are +`cc_library.copts` and `java_library.javacopts`. +Together these substitutions allow a +single string variable to expand into a configuration-specific list +of option words. + +## Label expansion + +Some string attributes of a very few rules are subject to label +expansion: if those strings contain a valid label as a +substring, such as `//mypkg:target`, and that label is a +declared prerequisite of the current rule, it is expanded into the +pathname of the file represented by the +[target](https://bazel.build/reference/glossary#target) `//mypkg:target`. + +Example attributes include `genrule.cmd` and +`cc_binary.linkopts`. The details may vary significantly in +each case, over such issues as: whether relative labels are +expanded; how labels that expand to multiple files are +treated, etc. Consult the rule attribute documentation for +specifics. + +## Typical attributes defined by most build rules + +This section describes attributes that are defined by many build rules, +but not all. + +AttributeDescription`data` + +List of [labels](/concepts/labels); default is `[]` + +Files needed by this rule at runtime. May list file or rule targets. Generally +allows any target. + +The default outputs and runfiles of targets in the `data` attribute +should appear in the `*.runfiles` area of any executable which is +output by or has a runtime dependency on this target. This may include data +files or binaries used when this target's +[`srcs`](#typical.srcs) are executed. See the +[data dependencies](/concepts/dependencies#data-dependencies) +section for more information about how to depend on and use data files. + +New rules should define a `data` attribute if they process +inputs which might use other inputs at runtime. Rules' implementation functions +must also [populate the target's\ +runfiles](https://bazel.build/rules/rules#runfiles) from the outputs and runfiles of any `data` attribute, +as well as runfiles from any dependency attribute which provides either +source code or runtime dependencies. + +`deps` + +List of [labels](/concepts/labels); default is `[]` + +Dependencies for this target. Generally should only list rule targets. (Though +some rules permit files to be listed directly in `deps`, this +should be avoided when possible.) + +Language-specific rules generally limit the listed targets to those with +specific [providers](https://bazel.build/extending/rules#providers). + +The precise semantics of what it means for a target to depend on another using +`deps` are specific to the kind of rule, and the rule-specific +documentation goes into more detail. For rules which process source code, +`deps` generally specifies code dependencies used by the code in +[`srcs`](#typical.srcs). + +Most often, a `deps` dependency is used to allow one module to use +symbols defined in another module written in the same programming language and +separately compiled. Cross-language dependencies are also permitted in many +cases: For example, a `java_library` target may depend on C++ code +in a `cc_library` target, by listing the latter in the +`deps` attribute. See the definition of +[dependencies](/concepts/build-ref#deps) +for more information. + +`licenses` + +List of strings; [nonconfigurable](#configurable-attributes); +default is `["none"]` + +A list of license-type strings to be used for this particular target. + +This is part of a deprecated licensing API that Bazel no longer uses. Don't +use this. + +`srcs` + +List of [labels](/concepts/labels); default is `[]` + +Files processed or included by this rule. Generally lists files directly, but +may list rule targets (like `filegroup` or `genrule`) to +include their default outputs. + +Language-specific rules often require that the listed files have particular +file extensions. + +## Attributes common to all build rules + +This section describes attributes that are implicitly added to all build +rules. + +AttributeDescription`aspect_hints` + +List of [labels](/concepts/labels); default is `[]` + +A list of arbitrary labels which is exposed to [aspects](/extending/aspects) (in +particular - aspects invoked by this rule's reverse dependencies), but isn't exposed to this rule's +own implementation. Consult documentation for language-specific rule sets for details about what +effect a particular aspect hint would have. + +You could think of an aspect hint as a richer alternative to a [tag](#common.tags): +while a tag conveys only a boolean state (the tag is either present or absent in the +`tags` list), an aspect hint can convey arbitrary structured information in its +[providers](/extending/rules#providers). + +In practice, aspect hints are used for interoperability between different language-specific +rule sets. For example, imagine you have a `mylang_binary` target which needs to depend +on an `otherlang_library` target. The MyLang-specific logic needs some additional +information about the OtherLang target in order to use it, but `otherlang_library` +doesn't provide this information because it knows nothing about MyLang. One solution might be for +the MyLang rule set to define a `mylang_hint` rule which can be used to encode that +additional information; the user can add the hint to their `otherlang_library`'s +`aspect_hints`, and `mylang_binary` can use an aspect to collect the +additional information from a MyLang-specific provider in the `mylang_hint`. + +For a concrete example, see +[`swift_interop_hint`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_interop_hint) +and [`swift_overlay`](https://github.com/bazelbuild/rules_swift/blob/master/doc/rules.md#swift_overlay) +in `rules_swift`. + +Best practices: + +- Targets listed in `aspect_hints` should be lightweight and minimal. +- Language-specific logic should consider only aspect hints having providers relevant to that + language, and should ignore any other aspect hints. + +`compatible_with` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is `[]` + +The list of environments this target can be built for, in addition to +default-supported environments. + +This is part of Bazel's constraint system, which lets users declare which +targets can and cannot depend on each other. For example, externally deployable +binaries shouldn't depend on libraries with company-secret code. See +[ConstraintSemantics](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/analysis/constraints/ConstraintSemantics.java#L46) for details. + +`deprecation` + +String; [nonconfigurable](#configurable-attributes); default is `None` + +An explanatory warning message associated with this target. +Typically this is used to notify users that a target has become obsolete, +or has become superseded by another rule, is private to a package, or is +perhaps considered harmful for some reason. It is a good idea to include +some reference (like a webpage, a bug number or example migration CLs) so +that one can easily find out what changes are required to avoid the message. +If there is a new target that can be used as a drop in replacement, it is a +good idea to just migrate all users of the old target. + +This attribute has no effect on the way things are built, but it +may affect a build tool's diagnostic output. The build tool issues a +warning when a rule with a `deprecation` attribute is +depended upon by a target in another package. + +Intra-package dependencies are exempt from this warning, so that, +for example, building the tests of a deprecated rule does not +encounter a warning. + +If a deprecated target depends on another deprecated target, no warning +message is issued. + +Once people have stopped using it, the target can be removed. + +`exec_compatible_with` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is `[]` + +A list of +`constraint_values` +that must be present in the execution platform of this target's default exec +group. This is in addition to any constraints already set by the rule type. +Constraints are used to restrict the list of available execution platforms. + +For more details, see +the description of +[toolchain resolution](/docs/toolchains#toolchain-resolution). +and +[exec groups](/extending/exec-groups) + +`exec_group_compatible_with` + +Dictionary of strings to lists of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is `{}` + +A dictionary of exec group names to lists of +`constraint_values` +that must be present in the execution platform for the given exec group. This +is in addition to any constraints already set on the exec group's definition. +Constraints are used to restrict the list of available execution platforms. + +For more details, see +the description of +[toolchain resolution](/docs/toolchains#toolchain-resolution). +and +[exec groups](/extending/exec-groups) + +`exec_properties` + +Dictionary of strings; default is `{}` + +A dictionary of strings that will be added to the `exec_properties` of a platform selected for this target. See `exec_properties` of the [platform](platforms-and-toolchains.html#platform) rule. + +If a key is present in both the platform and target-level properties, the value will be taken from the target. + +Keys can be prefixed with the name of an execution group followed by a `.` to apply them only to that particular exec group. + +`features` + +List of _feature_ strings; default is `[]` + +A feature is string tag that can be enabled or disabled on a target. The +meaning of a feature depends on the rule itself. + +This `features` attribute is combined with the [package](/reference/be/functions.html#package) level `features` attribute. For example, if +the features \["a", "b"\] are enabled on the package level, and a target's +`features` attribute contains \["-a", "c"\], the features enabled for the +rule will be "b" and "c". +[See example](https://github.com/bazelbuild/examples/blob/main/rules/features/BUILD). + +`package_metadata` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is the package's +`default_package_metadata` + +A list of labels that are associated metadata about this target. +Typically, the labels are simple rules that return a provider of +constant values. Rules and aspects may use these labels to perform some +additional analysis on the build graph. + +The canonical use case is that of +[rules\_license](https://github.com/bazelbuild/rules_license). +For that use case, `package_metadata` and +`default_package_metadata` is used to attach information +about a package's licence or version to targets. An aspect applied +to a top-level binary can be used to gather those and produce +compliance reports. + +`restricted_to` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is `[]` + +The list of environments this target can be built for, _instead_ of +default-supported environments. + +This is part of Bazel's constraint system. See +`compatible_with` +for details. + +`tags` + +List of strings; [nonconfigurable](#configurable-attributes); +default is `[]` + +_Tags_ can be used on any rule. _Tags_ on test and +`test_suite` rules are useful for categorizing the tests. +_Tags_ on non-test targets are used to control sandboxed execution of +`genrule` s and + +[Starlark](/rules/concepts) +actions, and for parsing by humans and/or external tools. + +Bazel modifies the behavior of its sandboxing code if it finds the following +keywords in the `tags` attribute of any test or `genrule` +target, or the keys of `execution_requirements` for any Starlark +action. + +- `no-sandbox` keyword results in the action or test never being + sandboxed; it can still be cached or run remotely - use `no-cache` + or `no-remote` to prevent either or both of those. + +- `no-cache` keyword results in the action or test never being + cached (locally or remotely). Note: for the purposes of this tag, the disk cache + is considered a local cache, whereas the HTTP and gRPC caches are considered + remote. Other caches, such as Skyframe or the persistent action cache, are not + affected. + +- `no-remote-cache` keyword results in the action or test never being + cached remotely (but it may be cached locally; it may also be executed remotely). + Note: for the purposes of this tag, the disk cache is considered a local cache, + whereas the HTTP and gRPC caches are considered remote. Other caches, such as + Skyframe or the persistent action cache, are not affected. + If a combination of local disk cache and remote cache are used (combined cache), + it's treated as a remote cache and disabled entirely unless `--incompatible_remote_results_ignore_disk` + is set in which case the local components will be used. + +- `no-remote-exec` keyword results in the action or test never being + executed remotely (but it may be cached remotely). + +- `no-remote` keyword prevents the action or test from being executed remotely or + cached remotely. This is equivalent to using both + `no-remote-cache` and `no-remote-exec`. + +- `no-remote-cache-upload` keyword disables upload part of remote caching of a spawn. + it does not disable remote execution. + +- `local` keyword precludes the action or test from being remotely cached, + remotely executed, or run inside the sandbox. + For genrules and tests, marking the rule with the `local = True` + attribute has the same effect. + +- `requires-network` keyword allows access to the external + network from inside the sandbox. This tag only has an effect if sandboxing + is enabled. + +- `block-network` keyword blocks access to the external + network from inside the sandbox. In this case, only communication + with localhost is allowed. This tag only has an effect if sandboxing is + enabled. + +- `requires-fakeroot` runs the test or action as uid and gid 0 (i.e., the root + user). This is only supported on Linux. This tag takes precedence over the + `--sandbox_fake_username` command-line option. + + +_Tags_ on tests are generally used to annotate a test's role in your +debug and release process. Typically, tags are most useful for C++ and Python +tests, which lack any runtime annotation ability. The use of tags and size +elements gives flexibility in assembling suites of tests based around codebase +check-in policy. + +Bazel modifies test running behavior if it finds the following keywords in the +`tags` attribute of the test rule: + +- `exclusive` will force the test to be run in the + "exclusive" mode, ensuring that no other tests are running at the + same time. Such tests will be executed in serial fashion after all build + activity and non-exclusive tests have been completed. Remote execution is + disabled for such tests because Bazel doesn't have control over what's + running on a remote machine. + +- `exclusive-if-local` will force the test to be run in the + "exclusive" mode if it is executed locally, but will run the test in parallel if it's + executed remotely. + +- `manual` keyword will exclude the target from expansion of target pattern wildcards + ( `...`, `:*`, `:all`, etc.) and `test_suite` rules + which do not list the test explicitly when computing the set of top-level targets to build/run + for the `build`, `test`, and `coverage` commands. It does not + affect target wildcard or test suite expansion in other contexts, including the + `query` command. Note that `manual` does not imply that a target should + not be built/run automatically by continuous build/test systems. For example, it may be + desirable to exclude a target from `bazel test ...` because it requires specific + Bazel flags, but still have it included in properly-configured presubmit or continuous test + runs. + + +- `external` keyword will force test to be unconditionally + executed (regardless of `--cache_test_results` + value). + + +See +[Tag Conventions](/reference/test-encyclopedia#tag-conventions) + in the Test Encyclopedia for more conventions on tags attached to test targets. +`target_compatible_with` + +List of [labels](/concepts/labels); default is `[]` + +A list of +`constraint_value` s +that must be present in the target platform for this target to be considered +_compatible_. This is in addition to any constraints already set by the +rule type. If the target platform does not satisfy all listed constraints then +the target is considered _incompatible_. Incompatible targets are +skipped for building and testing when the target pattern is expanded +(e.g. `//...`, `:all`). When explicitly specified on the +command line, incompatible targets cause Bazel to print an error and cause a +build or test failure. + +Targets that transitively depend on incompatible targets are themselves +considered incompatible. They are also skipped for building and testing. + +An empty list (which is the default) signifies that the target is compatible +with all platforms. + +All rules other than [Workspace Rules](workspace.html) support this +attribute. +For some rules this attribute has no effect. For example, specifying +`target_compatible_with` for a +`cc_toolchain` is not useful. + +See the +[Platforms](/docs/platforms#skipping-incompatible-targets) +page for more information about incompatible target skipping. + +`testonly` + +Boolean; [nonconfigurable](#configurable-attributes); default is `False` +except for test and test suite targets + +If `True`, only testonly targets (such as tests) can depend on this target. + +Equivalently, a rule that is not `testonly` is not allowed to +depend on any rule that is `testonly`. + +Tests ( `*_test` rules) +and test suites ( [test\_suite](/reference/be/general.html#test_suite) rules) +are `testonly` by default. + +This attribute is intended to mean that the target should not be +contained in binaries that are released to production. + +Because testonly is enforced at build time, not run time, and propagates +virally through the dependency tree, it should be applied judiciously. For +example, stubs and fakes that +are useful for unit tests may also be useful for integration tests +involving the same binaries that will be released to production, and +therefore should probably not be marked testonly. Conversely, rules that +are dangerous to even link in, perhaps because they unconditionally +override normal behavior, should definitely be marked testonly. + +`toolchains` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); default is `[]` + +The set of targets whose [Make variables](/reference/be/make-variables) this target is +allowed to access. These targets are either instances of rules that provide +`TemplateVariableInfo` or special targets for toolchain types built into Bazel. These +include: + +- `@bazel_tools//tools/cpp:toolchain_type` +- `@rules_java//toolchains:current_java_runtime` + +Note that this is distinct from the concept of +[toolchain resolution](/docs/toolchains#toolchain-resolution) +that is used by rule implementations for platform-dependent configuration. You cannot use this +attribute to determine which specific `cc_toolchain` or `java_toolchain` a +target will use. + +`visibility` + +List of [labels](/concepts/labels); +[nonconfigurable](#configurable-attributes); +default varies + +The `visibility` attribute controls whether the target can be +depended on by targets in other locations. See the documentation for +[visibility](/concepts/visibility). + +For targets declared directly in a BUILD file or in legacy macros called from +a BUILD file, the default value is the package's +`default_visibility` +if specified, or else `["//visibility:private"]`. For targets +declared in one or more symbolic macros, the default value is always just +`["//visibility:private"]` (which makes it useable only within the +package containing the macro's code). + +## Attributes common to all test rules (\*\_test) + +This section describes attributes that are common to all test rules. + +AttributeDescription`args` + +List of strings; subject to +[$(location)](/reference/be/make-variables#predefined_label_variables) and +["Make variable"](/reference/be/make-variables) substitution, and +[Bourne shell tokenization](#sh-tokenization); default is `[]` + +Command line arguments that Bazel passes to the target when it is +executed with `bazel test`. + +These arguments are passed before any `--test_arg` values +specified on the `bazel test` command line. + +`env` + +Dictionary of strings; values are subject to +[$(location)](/reference/be/make-variables#predefined_label_variables) and +["Make variable"](/reference/be/make-variables) substitution; default is `{}` + +Specifies additional environment variables to set when the test is executed by +`bazel test`. + +This attribute only applies to native rules, like `cc_test`, +`py_test`, and `sh_test`. It does not apply to +Starlark-defined test rules. For your own Starlark rules, you can add an "env" +attribute and use it to populate a + +[RunEnvironmentInfo](/rules/lib/providers/RunEnvironmentInfo.html) +Provider. + +[TestEnvironment](/rules/lib/toplevel/testing#TestEnvironment) + + Provider. + +`env_inherit` + +List of strings; default is `[]` + +Specifies additional environment variables to inherit from the +external environment when the test is executed by `bazel test`. + +This attribute only applies to native rules, like `cc_test`, `py_test`, +and `sh_test`. It does not apply to Starlark-defined test rules. + +`size` + +String `"enormous"`, `"large"`, `"medium"`, or +`"small"`; [nonconfigurable](#configurable-attributes); +default is `"medium"` + +Specifies a test target's "heaviness": how much time/resources it needs to run. + +Unit tests are considered "small", integration tests "medium", and end-to-end tests "large" or +"enormous". Bazel uses the size to determine a default timeout, which can be overridden using the +`timeout` attribute. The timeout is for all tests in the BUILD target, not for each +individual test. When the test is run locally, the `size` is additionally used for +scheduling purposes: Bazel tries to respect `--local_{ram,cpu}_resources` and not +overwhelm the local machine by running lots of heavy tests at the same time. + +Test sizes correspond to the following default timeouts and assumed peak local resource +usages: + +SizeRAM (in MB)CPU (in CPU cores)Default timeoutsmall201short (1 minute)medium1001moderate (5 minutes)large3001long (15 minutes)enormous8001eternal (60 minutes) + +The environment variable +`TEST_SIZE` will be set to +the value of this attribute when spawning the test. + +`timeout` + +String `"short"`, `"moderate"`, `"long"`, or +`"eternal"`; [nonconfigurable](#configurable-attributes); default is derived +from the test's `size` attribute + +How long the test is expected to run before returning. + +While a test's size attribute controls resource estimation, a test's +timeout may be set independently. If not explicitly specified, the +timeout is based on the [test's size](#test.size). The test +timeout can be overridden with the `--test_timeout` flag, e.g. for +running under certain conditions which are known to be slow. Test timeout values +correspond to the following time periods: + +Timeout ValueTime Periodshort1 minutemoderate5 minuteslong15 minuteseternal60 minutes + +For times other than the above, the test timeout can be overridden with the +`--test_timeout` bazel flag, e.g. for manually running under +conditions which are known to be slow. The `--test_timeout` values +are in seconds. For example `--test_timeout=120` will set the test +timeout to two minutes. + +The environment variable +`TEST_TIMEOUT` will be set +to the test timeout (in seconds) when spawning the test. + +`flaky` + +Boolean; [nonconfigurable](#configurable-attributes); +default is `False` + +Marks test as flaky. + +If set, executes the test up to three times, marking it as failed only if it +fails each time. By default, this attribute is set to False and the test is +executed only once. Note, that use of this attribute is generally discouraged - +tests should pass reliably when their assertions are upheld. + +`shard_count` + +Non-negative integer less than or equal to 50; default is `-1` + +Specifies the number of parallel shards +to use to run the test. + +If set, this value will override any heuristics used to determine the number of +parallel shards with which to run the test. Note that for some test +rules, this parameter may be required to enable sharding +in the first place. Also see `--test_sharding_strategy`. + +If test sharding is enabled, the environment variable ` +TEST_TOTAL_SHARDS +` will be set to this value when spawning the test. + +Sharding requires the test runner to support the test sharding protocol. +If it does not, then it will most likely run every test in every shard, which +is not what you want. + +See +[Test Sharding](/reference/test-encyclopedia#test-sharding) +in the Test Encyclopedia for details on sharding. + +`local` + +Boolean; [nonconfigurable](#configurable-attributes); +default is `False` + +Forces the test to be run locally, without sandboxing. + +Setting this to True is equivalent to providing "local" as a tag +( `tags=["local"]`). + +## Attributes common to all binary rules (\*\_binary) + +This section describes attributes that are common to all binary rules. + +AttributeDescription`args` + +List of strings; subject to +[$(location)](/reference/be/make-variables#predefined_label_variables) and +["Make variable"](/reference/be/make-variables) substitution, and +[Bourne shell tokenization](#sh-tokenization); +[nonconfigurable](#configurable-attributes); +default is `[]` + +Command line arguments that Bazel will pass to the target when it is executed +either by the `run` command or as a test. These arguments are +passed before the ones that are specified on the `bazel run` or +`bazel test` command line. + +_NOTE: The arguments are not passed when you run the target_ +_outside of Bazel (for example, by manually executing the binary in_ +_`bazel-bin/`)._ + +`env` + +Dictionary of strings; values are subject to +[$(location)](/reference/be/make-variables#predefined_label_variables) and +["Make variable"](/reference/be/make-variables) substitution; default is `{}` + +Specifies additional environment variables to set when the target is +executed by `bazel run`. + +This attribute only applies to native rules, like `cc_binary`, `py_binary`, +and `sh_binary`. It does not apply to Starlark-defined executable rules. For your own +Starlark rules, you can add an "env" attribute and use it to populate a + +[RunEnvironmentInfo](/rules/lib/providers/RunEnvironmentInfo.html) + +Provider. + +_NOTE: The environment variables are not set when you run the target_ +_outside of Bazel (for example, by manually executing the binary in_ +_`bazel-bin/`)._ + +`output_licenses` + +List of strings; default is `[]` + +The licenses of the output files that this binary generates. + +This is part of a deprecated licensing API that Bazel no longer uses. Don't +use this. + +## Configurable attributes + +Most attributes are "configurable", meaning that their values may change when +the target is built in different ways. Specifically, configurable attributes +may vary based on the flags passed to the Bazel command line, or what +downstream dependency is requesting the target. This can be used, for +instance, to customize the target for multiple platforms or compilation modes. + +The following example declares different sources for different target +architectures. Running `bazel build :multiplatform_lib --cpu x86` +will build the target using `x86_impl.cc`, while substituting +`--cpu arm` will instead cause it to use `arm_impl.cc`. + +``` +cc_library( + name = "multiplatform_lib", + srcs = select({ + ":x86_mode": ["x86_impl.cc"], + ":arm_mode": ["arm_impl.cc"] + }) +) +config_setting( + name = "x86_mode", + values = { "cpu": "x86" } +) +config_setting( + name = "arm_mode", + values = { "cpu": "arm" } +) + +``` + +The [`select()`](/reference/be/functions.html#select) function +chooses among different alternative values for a configurable attribute based +on which [`config_setting`](/reference/be/general.html#config_setting) +or [`constraint_value`](/reference/be/platforms-and-toolchains.html#constraint_value) +criteria the target's configuration satisfies. + +Bazel evaluates configurable attributes after processing macros and before +processing rules (technically, between the +[loading and analysis phases](https://bazel.build/rules/concepts#evaluation-model)). +Any processing before `select()` evaluation doesn't know which +branch the `select()` chooses. Macros, for example, can't change +their behavior based on the chosen branch, and `bazel query` can +only make conservative guesses about a target's configurable dependencies. See +[this FAQ](https://bazel.build/docs/configurable-attributes#faq) +for more on using `select()` with rules and macros. + +Attributes marked `nonconfigurable` in their documentation cannot +use this feature. Usually an attribute is nonconfigurable because Bazel +internally needs to know its value before it can determine how to resolve a +`select()`. + +See [Configurable Build Attributes](https://bazel.build/docs/configurable-attributes) for a detailed overview. + +## Implicit output targets + +_Implicit outputs in C++ are deprecated. Please refrain from using it_ +_in other languages where possible. We don't have a deprecation path yet_ +_but they will eventually be deprecated too._ + +When you define a build rule in a BUILD file, you are explicitly +declaring a new, named rule target in a package. Many build rule +functions also _implicitly_ entail one or more output file +targets, whose contents and meaning are rule-specific. + +For example, when you explicitly declare a +`java_binary(name='foo', ...)` rule, you are also +_implicitly_ declaring an output file +target `foo_deploy.jar` as a member of the same package. +(This particular target is a self-contained Java archive suitable +for deployment.) + +Implicit output targets are first-class members of the global +target graph. Just like other targets, they are built on demand, +either when specified in the top-level built command, or when they +are necessary prerequisites for other build targets. They can be +referenced as dependencies in BUILD files, and can be observed in +the output of analysis tools such as `bazel query`. + +For each kind of build rule, the rule's documentation contains a +special section detailing the names and contents of any implicit +outputs entailed by a declaration of that kind of rule. + +An important but somewhat subtle distinction between the +two namespaces used by the build system: +[labels](/concepts/labels) identify _targets_, +which may be rules or files, and file targets may be divided into +either source (or input) file targets and derived (or output) file +targets. These are the things you can mention in BUILD files, +build from the command-line, or examine using `bazel query`; +this is the _target namespace_. Each file target corresponds +to one actual file on disk (the "file system namespace"); each rule +target may correspond to zero, one or more actual files on disk. +There may be files on disk that have no corresponding target; for +example, `.o` object files produced during C++ compilation +cannot be referenced from within BUILD files or from the command line. +In this way, the build tool may hide certain implementation details of +how it does its job. This is explained more fully in +the [BUILD Concept Reference](/concepts/build-ref). diff --git a/versions/5.4.1/reference/be/extra-actions.mdx b/versions/5.4.1/reference/be/extra-actions.mdx new file mode 100644 index 0000000..0c16bd9 --- /dev/null +++ b/versions/5.4.1/reference/be/extra-actions.mdx @@ -0,0 +1,210 @@ +--- +title: 'Extra Actions Rules' +--- + + + +## Rules + +- [action\_listener](#action_listener) +- [extra\_action](#extra_action) + +## action\_listener + +[View rule sourceopen\_in\_new](https://github.com/bazelbuild/bazel/blob/master/src/main/java/com/google/devtools/build/lib/rules/extra/ActionListenerRule.java) + +``` +action_listener(name, aspect_hints, compatible_with, deprecation, exec_compatible_with, exec_group_compatible_with, exec_properties, extra_actions, features, licenses, mnemonics, package_metadata, restricted_to, tags, target_compatible_with, testonly, visibility) +``` + +**WARNING:** Extra actions are deprecated. Use +[aspects](https://bazel.build/rules/aspects) +instead. + +An `action_listener` rule doesn't produce any output itself. +Instead, it allows tool developers to insert +[`extra_action`](/reference/be/extra-actions.html#extra_action) s into the build system, +by providing a mapping from action to [`extra_action`](/reference/be/extra-actions.html#extra_action). + +This rule's arguments map action mnemonics to +[`extra_action`](/reference/be/extra-actions.html#extra_action) rules. + +By specifying the option [`--experimental_action_listener=