2023-08-14 14:08:59 -07:00
|
|
|
# The XLA commit is determined by third_party/xla/workspace.bzl.
|
|
|
|
load("//third_party/xla:workspace.bzl", jax_xla_workspace = "repo")
|
|
|
|
jax_xla_workspace()
|
2018-12-12 09:16:03 -05:00
|
|
|
|
2024-05-15 18:20:14 -07:00
|
|
|
# Initialize hermetic Python
|
|
|
|
load("@xla//third_party/py:python_init_rules.bzl", "python_init_rules")
|
|
|
|
python_init_rules()
|
|
|
|
|
|
|
|
load("@xla//third_party/py:python_init_repositories.bzl", "python_init_repositories")
|
|
|
|
python_init_repositories(
|
|
|
|
requirements = {
|
|
|
|
"3.10": "//build:requirements_lock_3_10.txt",
|
|
|
|
"3.11": "//build:requirements_lock_3_11.txt",
|
|
|
|
"3.12": "//build:requirements_lock_3_12.txt",
|
|
|
|
"3.13": "//build:requirements_lock_3_13.txt",
|
2025-01-08 21:27:30 +01:00
|
|
|
"3.13-ft": "//build:requirements_lock_3_13_ft.txt",
|
2024-05-15 18:20:14 -07:00
|
|
|
},
|
2024-07-09 11:24:46 -07:00
|
|
|
local_wheel_inclusion_list = [
|
|
|
|
"jaxlib*",
|
|
|
|
"jax_cuda*",
|
|
|
|
"jax-cuda*",
|
|
|
|
],
|
2024-06-10 11:41:07 -07:00
|
|
|
local_wheel_workspaces = ["//jaxlib:jax.bzl"],
|
|
|
|
local_wheel_dist_folder = "../dist",
|
|
|
|
default_python_version = "system",
|
2024-05-15 18:20:14 -07:00
|
|
|
)
|
|
|
|
|
|
|
|
load("@xla//third_party/py:python_init_toolchains.bzl", "python_init_toolchains")
|
|
|
|
python_init_toolchains()
|
|
|
|
|
|
|
|
load("@xla//third_party/py:python_init_pip.bzl", "python_init_pip")
|
|
|
|
python_init_pip()
|
|
|
|
|
|
|
|
load("@pypi//:requirements.bzl", "install_deps")
|
|
|
|
install_deps()
|
|
|
|
|
|
|
|
# Optional, to facilitate testing against newest versions of Python
|
|
|
|
load("@xla//third_party/py:python_repo.bzl", "custom_python_interpreter")
|
|
|
|
custom_python_interpreter(
|
|
|
|
name = "python_dev",
|
2024-09-16 12:13:47 -07:00
|
|
|
urls = ["https://www.python.org/ftp/python/{version}/Python-{version_variant}.tgz"],
|
|
|
|
strip_prefix = "Python-{version_variant}",
|
|
|
|
version = "3.13.0",
|
|
|
|
version_variant = "3.13.0rc2",
|
2024-05-15 18:20:14 -07:00
|
|
|
)
|
|
|
|
|
2023-02-14 21:24:27 +00:00
|
|
|
load("@xla//:workspace4.bzl", "xla_workspace4")
|
|
|
|
xla_workspace4()
|
2021-08-01 08:53:12 -07:00
|
|
|
|
2023-02-14 21:24:27 +00:00
|
|
|
load("@xla//:workspace3.bzl", "xla_workspace3")
|
|
|
|
xla_workspace3()
|
2021-08-01 08:53:12 -07:00
|
|
|
|
2023-02-14 21:24:27 +00:00
|
|
|
load("@xla//:workspace2.bzl", "xla_workspace2")
|
|
|
|
xla_workspace2()
|
2021-08-01 08:53:12 -07:00
|
|
|
|
2023-02-14 21:24:27 +00:00
|
|
|
load("@xla//:workspace1.bzl", "xla_workspace1")
|
|
|
|
xla_workspace1()
|
|
|
|
|
|
|
|
load("@xla//:workspace0.bzl", "xla_workspace0")
|
|
|
|
xla_workspace0()
|
|
|
|
|
|
|
|
load("//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
|
2023-05-04 14:33:33 -07:00
|
|
|
flatbuffers()
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
|
2025-02-05 10:00:49 -08:00
|
|
|
load("//jaxlib:jax_python_wheel.bzl", "jax_python_wheel_repository")
|
|
|
|
jax_python_wheel_repository(
|
|
|
|
name = "jax_wheel",
|
|
|
|
version_key = "_version",
|
|
|
|
version_source = "//jax:version.py",
|
|
|
|
)
|
|
|
|
|
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/py:python_wheel.bzl",
|
2025-02-05 10:00:49 -08:00
|
|
|
"python_wheel_version_suffix_repository",
|
|
|
|
)
|
|
|
|
python_wheel_version_suffix_repository(
|
|
|
|
name = "jax_wheel_version_suffix",
|
|
|
|
)
|
|
|
|
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_json_init_repository",
|
|
|
|
)
|
|
|
|
|
|
|
|
cuda_json_init_repository()
|
|
|
|
|
|
|
|
load(
|
|
|
|
"@cuda_redist_json//:distributions.bzl",
|
|
|
|
"CUDA_REDISTRIBUTIONS",
|
|
|
|
"CUDNN_REDISTRIBUTIONS",
|
|
|
|
)
|
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_redist_init_repositories",
|
|
|
|
"cudnn_redist_init_repository",
|
|
|
|
)
|
|
|
|
|
|
|
|
cuda_redist_init_repositories(
|
|
|
|
cuda_redistributions = CUDA_REDISTRIBUTIONS,
|
|
|
|
)
|
|
|
|
|
|
|
|
cudnn_redist_init_repository(
|
|
|
|
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
|
|
|
|
)
|
|
|
|
|
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"cuda_configure",
|
|
|
|
)
|
|
|
|
|
|
|
|
cuda_configure(name = "local_config_cuda")
|
|
|
|
|
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"nccl_redist_init_repository",
|
|
|
|
)
|
|
|
|
|
|
|
|
nccl_redist_init_repository()
|
|
|
|
|
|
|
|
load(
|
2025-03-04 15:54:40 -08:00
|
|
|
"@xla//third_party/nccl/hermetic:nccl_configure.bzl",
|
Introduce hermetic CUDA in Google ML projects.
1) Hermetic CUDA rules allow building wheels with GPU support on a machine without GPUs, as well as running Bazel GPU tests on a machine with only GPUs and NVIDIA driver installed. When `--config=cuda` is provided in Bazel options, Bazel will download CUDA, CUDNN and NCCL redistributions in the cache, and use them during build and test phases.
[Default location of CUNN redistributions](https://developer.download.nvidia.com/compute/cudnn/redist/)
[Default location of CUDA redistributions](https://developer.download.nvidia.com/compute/cuda/redist/)
[Default location of NCCL redistributions](https://pypi.org/project/nvidia-nccl-cu12/#history)
2) To include hermetic CUDA rules in your project, add the following in the WORKSPACE of the downstream project dependent on XLA.
Note: use `@local_tsl` instead of `@tsl` in Tensorflow project.
```
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_json_init_repository.bzl",
"cuda_json_init_repository",
)
cuda_json_init_repository()
load(
"@cuda_redist_json//:distributions.bzl",
"CUDA_REDISTRIBUTIONS",
"CUDNN_REDISTRIBUTIONS",
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_redist_init_repositories.bzl",
"cuda_redist_init_repositories",
"cudnn_redist_init_repository",
)
cuda_redist_init_repositories(
cuda_redistributions = CUDA_REDISTRIBUTIONS,
)
cudnn_redist_init_repository(
cudnn_redistributions = CUDNN_REDISTRIBUTIONS,
)
load(
"@tsl//third_party/gpus/cuda/hermetic:cuda_configure.bzl",
"cuda_configure",
)
cuda_configure(name = "local_config_cuda")
load(
"@tsl//third_party/nccl/hermetic:nccl_redist_init_repository.bzl",
"nccl_redist_init_repository",
)
nccl_redist_init_repository()
load(
"@tsl//third_party/nccl/hermetic:nccl_configure.bzl",
"nccl_configure",
)
nccl_configure(name = "local_config_nccl")
```
PiperOrigin-RevId: 662981325
2024-08-14 10:57:53 -07:00
|
|
|
"nccl_configure",
|
|
|
|
)
|
|
|
|
|
|
|
|
nccl_configure(name = "local_config_nccl")
|