Remove references to Python 3.8.

Remove the old build scripts/Dockerfile, since they are unused and broken.

PiperOrigin-RevId: 542870354
This commit is contained in:
Peter Hawkins 2023-06-23 08:48:21 -07:00 committed by jax authors
parent bbc6f30693
commit bfa113ba60
11 changed files with 2 additions and 254 deletions

View File

@ -194,8 +194,6 @@ build:rbe_linux_cuda11.8_nvcc_base --platforms="@ubuntu20.04-gcc9_manylinux2014-
build:rbe_linux_cuda11.8_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_cuda"
build:rbe_linux_cuda11.8_nvcc_base --repo_env=TF_TENSORRT_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_tensorrt"
build:rbe_linux_cuda11.8_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_nccl"
build:rbe_linux_cuda11.8_nvcc_py3.8 --config=rbe_linux_cuda11.8_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_python3.8"
build:rbe_linux_cuda11.8_nvcc_py3.8 --python_path="/usr/local/bin/python3.8"
build:rbe_linux_cuda11.8_nvcc_py3.9 --config=rbe_linux_cuda11.8_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_python3.9"
build:rbe_linux_cuda11.8_nvcc_py3.9 --python_path="/usr/local/bin/python3.9"
build:rbe_linux_cuda11.8_nvcc_py3.10 --config=rbe_linux_cuda11.8_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda11.8-cudnn8.6-tensorrt8.4_config_python3.10"
@ -217,8 +215,6 @@ build:rbe_linux_cuda12.0.1_nvcc_base --host_platform="@ubuntu20.04-gcc9_manylinu
build:rbe_linux_cuda12.0.1_nvcc_base --platforms="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_platform//:platform"
build:rbe_linux_cuda12.0.1_nvcc_base --repo_env=TF_CUDA_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_cuda"
build:rbe_linux_cuda12.0.1_nvcc_base --repo_env=TF_NCCL_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_nccl"
build:rbe_linux_cuda12.0.1_nvcc_py3.8 --config=rbe_linux_cuda12.0.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_python3.8"
build:rbe_linux_cuda12.0.1_nvcc_py3.8 --python_path="/usr/local/bin/python3.8"
build:rbe_linux_cuda12.0.1_nvcc_py3.9 --config=rbe_linux_cuda12.0.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_python3.9"
build:rbe_linux_cuda12.0.1_nvcc_py3.9 --python_path="/usr/local/bin/python3.9"
build:rbe_linux_cuda12.0.1_nvcc_py3.10 --config=rbe_linux_cuda12.0.1_nvcc_base --repo_env=TF_PYTHON_CONFIG_REPO="@ubuntu20.04-gcc9_manylinux2014-cuda12.0.1-cudnn8.8_config_python3.10"

View File

@ -1,35 +0,0 @@
FROM gcr.io/tensorflow-testing/nosla-cuda10.0-cudnn7-ubuntu16.04-manylinux2010
LABEL maintainer "Matt Johnson <mattjj@google.com>"
WORKDIR /
# TODO(skyewm): delete the following line when no longer necessary.
RUN rm -f /etc/apt/sources.list.d/jonathonf-ubuntu-python-3_6-xenial.list
RUN apt-get update
RUN apt-get install libffi-dev
RUN git clone --branch v1.2.21 https://github.com/pyenv/pyenv.git /pyenv
ENV PYENV_ROOT /pyenv
RUN /pyenv/bin/pyenv install 3.7.2
RUN /pyenv/bin/pyenv install 3.8.0
RUN /pyenv/bin/pyenv install 3.9.0
# We pin numpy to the minimum permitted version to avoid compatibility issues.
RUN eval "$(/pyenv/bin/pyenv init -)" && /pyenv/bin/pyenv local 3.7.2 && pip install numpy==1.19.5 setuptools wheel six auditwheel
RUN eval "$(/pyenv/bin/pyenv init -)" && /pyenv/bin/pyenv local 3.8.0 && pip install numpy==1.19.5 setuptools wheel six auditwheel
RUN eval "$(/pyenv/bin/pyenv init -)" && /pyenv/bin/pyenv local 3.9.0 && pip install numpy==1.19.5 setuptools wheel six auditwheel
# Change the CUDA version if it doesn't match the installed version.
ARG JAX_CUDA_VERSION=10.0
COPY install_cuda.sh /install_cuda.sh
RUN chmod +x /install_cuda.sh
RUN /bin/bash -c 'if [[ ! "$CUDA_VERSION" =~ ^$JAX_CUDA_VERSION.*$ ]]; then \
/install_cuda.sh $JAX_CUDA_VERSION; \
fi'
WORKDIR /
COPY build_wheel_docker_entrypoint.sh /build_wheel_docker_entrypoint.sh
RUN chmod +x /build_wheel_docker_entrypoint.sh
WORKDIR /build
ENV TEST_TMPDIR /build
ENTRYPOINT ["/build_wheel_docker_entrypoint.sh"]

View File

@ -1,11 +0,0 @@
#!/bin/bash
set -xev
source "$(dirname $(realpath $0))/build_jaxlib_wheels_helpers.sh"
PYTHON_VERSIONS="3.7.2 3.8.0 3.9.0"
CUDA_VERSIONS="10.1 10.2 11.0 11.1"
CUDA_VARIANTS="cuda" # "cuda-included"
build_cuda_wheels "$PYTHON_VERSIONS" "$CUDA_VERSIONS" "$CUDA_VARIANTS"
build_nocuda_wheels "$PYTHON_VERSIONS"

View File

@ -1,33 +0,0 @@
#!/bin/bash
build_cuda_wheels() {
local PYTHON_VERSIONS=$1
local CUDA_VERSIONS=$2
local CUDA_VARIANTS=$3
mkdir -p dist
for CUDA_VERSION in $CUDA_VERSIONS
do
docker build -t jaxbuild jax/build/ --build-arg JAX_CUDA_VERSION=$CUDA_VERSION
for PYTHON_VERSION in $PYTHON_VERSIONS
do
for CUDA_VARIANT in $CUDA_VARIANTS
do
mkdir -p dist/${CUDA_VARIANT}${CUDA_VERSION//.}
docker run --tmpfs /build:exec --rm -v $(pwd)/dist:/dist jaxbuild $PYTHON_VERSION $CUDA_VARIANT $CUDA_VERSION
mv -f dist/*.whl dist/${CUDA_VARIANT}${CUDA_VERSION//.}/
done
done
done
}
build_nocuda_wheels() {
local PYTHON_VERSIONS=$1
mkdir -p dist
docker build -t jaxbuild jax/build/
for PYTHON_VERSION in $PYTHON_VERSIONS
do
mkdir -p dist/nocuda/
docker run --tmpfs /build:exec --rm -v $(pwd)/dist:/dist jaxbuild $PYTHON_VERSION nocuda
mv -f dist/*.whl dist/nocuda/
done
}

View File

@ -1,49 +0,0 @@
#!/bin/bash
set -e
# Script that builds wheels for a JAX release on Mac OS X.
# Builds wheels for multiple Python versions, using pyenv instead of Docker.
# Usage: run from root of JAX source tree as:
# build/build_jaxlib_wheels_macos.sh
# The wheels will end up in dist/
#
# Requires pyenv, pyenv-virtualenv (e.g., from Homebrew). If you have Homebrew
# installed, you can install these with:
# brew install pyenv pyenv-virtualenv
#
# May also need to install XCode command line tools to fix zlib build problem:
# https://github.com/pyenv/pyenv/issues/1219
if ! pyenv --version 2>/dev/null ;then
echo "Error: You need to install pyenv and pyenv-virtualenv"
exit 1
fi
eval "$(pyenv init -)"
build_jax () {
PY_VERSION="$1"
NUMPY_VERSION="$2"
echo -e "\nBuilding JAX for Python ${PY_VERSION}"
echo "NumPy version ${NUMPY_VERSION}"
pyenv install -s "${PY_VERSION}"
VENV="jax-build-${PY_VERSION}"
if pyenv virtualenvs | grep "${VENV}" ;then
pyenv virtualenv-delete -f "${VENV}"
fi
pyenv virtualenv "${PY_VERSION}" "${VENV}"
pyenv activate "${VENV}"
# We pin the Numpy wheel to a version < 1.16.0 for Python releases prior to
# 3.8, because Numpy extensions built at 1.16.0 are not backward compatible to
# earlier Numpy versions.
pip install numpy==$NUMPY_VERSION wheel future six
rm -fr build/build
python build/build.py --output_path=dist/
pyenv deactivate
pyenv virtualenv-delete -f "${VENV}"
}
rm -fr dist
build_jax 3.7.2 1.19.5
build_jax 3.8.0 1.19.5
build_jax 3.9.0 1.19.5

View File

@ -1,63 +0,0 @@
#!/bin/bash
set -xev
if [ ! -d "/dist" ]
then
echo "/dist must be mounted to produce output"
exit 1
fi
export CC=/dt7/usr/bin/gcc
export GCC_HOST_COMPILER_PATH=/dt7/usr/bin/gcc
export PYENV_ROOT="/pyenv"
export PATH="$PYENV_ROOT/bin:$PATH"
eval "$(pyenv init -)"
PY_VERSION="$1"
echo "Python version $PY_VERSION"
git clone https://github.com/google/jax /build/jax
cd /build/jax/build
mkdir /build/tmp
mkdir /build/root
export TMPDIR=/build/tmp
usage() {
echo "usage: ${0##*/} [py2|py3] [cuda-included|cuda|nocuda]"
exit 1
}
if [[ $# -lt 2 ]]
then
usage
fi
# Builds and activates a specific Python version.
pyenv local "$PY_VERSION"
# Workaround for https://github.com/bazelbuild/bazel/issues/9254
export BAZEL_LINKLIBS="-lstdc++"
export JAX_CUDA_VERSION=$3
case $2 in
cuda-included)
python build.py --enable_cuda --bazel_startup_options="--output_user_root=/build/root"
python include_cuda.py
;;
cuda)
python build.py --enable_cuda --bazel_startup_options="--output_user_root=/build/root"
;;
nocuda)
python build.py --enable_tpu --bazel_startup_options="--output_user_root=/build/root"
;;
*)
usage
esac
if ! python -m auditwheel show dist/jaxlib-*.whl | egrep 'platform tag: "(manylinux2010_x86_64|manylinux_2_12_x86_64)"' > /dev/null; then
# Print output for debugging
python -m auditwheel show dist/jaxlib-*.whl
echo "jaxlib wheel is not manylinux2010 compliant"
exit 1
fi
cp -r dist/* /dist

View File

@ -1,54 +0,0 @@
#!/bin/bash
set -xe
CUDA_VERSION=$1
LIBCUDNN=libcudnn7
if [ $CUDA_VERSION = "10.0" ]; then
CUBLAS=libcublas10
CUBLAS_DEV=libcublas-dev
elif [ $CUDA_VERSION = "10.1" ]; then
# Have to pin to libcublas10=10.2.1.243-1 due to bug in TF, see
# https://github.com/tensorflow/tensorflow/issues/9489#issuecomment-562394257
CUBLAS=libcublas10=10.2.1.243-1
CUBLAS_DEV=libcublas-dev=10.2.1.243-1
elif [ $CUDA_VERSION = "10.2" ]; then
CUBLAS=libcublas10
CUBLAS_DEV=libcublas-dev
CUDNN_VERSION=7.6.5.32
elif [ $CUDA_VERSION = "11.0" ]; then
CUBLAS=libcublas-11-0
CUBLAS_DEV=libcublas-dev-11-0
CUDNN_VERSION=8.0.5.39
LIBCUDNN=libcudnn8
elif [ $CUDA_VERSION = "11.1" ]; then
CUBLAS=libcublas-11-1
CUBLAS_DEV=libcublas-dev-11-1
CUDNN_VERSION=8.0.5.39
LIBCUDNN=libcudnn8
elif [ $CUDA_VERSION = "11.2" ]; then
CUBLAS=libcublas-11-2
CUBLAS_DEV=libcublas-dev-11-2
CUDNN_VERSION=8.1.0.77
LIBCUDNN=libcudnn8
else
echo "Unsupported CUDA version: $CUDA_VERSION"
exit 1
fi
echo "Installing cuda version: $CUDA_VERSION"
echo "cudnn version: $CUDNN_VERSION"
apt-get update
apt-get remove -y --allow-change-held-packages cuda-license-10-0 libcudnn7 libcudnn8 libnccl2
apt-get install -y --no-install-recommends --allow-downgrades \
$CUBLAS \
$CUBLAS_DEV \
cuda-nvml-dev-$CUDA_VERSION \
cuda-command-line-tools-$CUDA_VERSION \
cuda-libraries-dev-$CUDA_VERSION \
cuda-minimal-build-$CUDA_VERSION \
$LIBCUDNN=$CUDNN_VERSION-1+cuda$CUDA_VERSION \
$LIBCUDNN-dev=$CUDNN_VERSION-1+cuda$CUDA_VERSION
rm -f /usr/local/cuda
ln -s /usr/local/cuda-$CUDA_VERSION /usr/local/cuda

View File

@ -34,7 +34,7 @@ Follow these steps to contribute code:
[repository page](http://www.github.com/google/jax). This creates
a copy of the JAX repository in your own account.
3. Install Python >= 3.8 locally in order to run tests.
3. Install Python >= 3.9 locally in order to run tests.
4. `pip` installing your fork from source. This allows you to modify the code
and immediately test it out:

View File

@ -76,7 +76,6 @@ setup(
url='https://github.com/google/jax',
license='Apache-2.0',
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",

View File

@ -100,8 +100,7 @@ PyObject* SafeMap(PyObject* self, PyObject* const* args, Py_ssize_t nargs) {
return list.release().ptr();
}
// TODO(phawkins): use PyObject_Vectorcall after dropping Python 3.8 support
py::object out = py::reinterpret_steal<py::object>(_PyObject_Vectorcall(
py::object out = py::reinterpret_steal<py::object>(PyObject_Vectorcall(
fn, &values[1], (nargs - 1) | PY_VECTORCALL_ARGUMENTS_OFFSET,
/*kwnames=*/nullptr));
if (PyErr_Occurred()) {

View File

@ -138,7 +138,6 @@ setup(
url='https://github.com/google/jax',
license='Apache-2.0',
classifiers=[
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",