commit ece541992ca87226ac9ee69465b7efa75ba8f00d
Author: ben0i0d <ben0i0d@foxmail.com>
Date:   Sat Nov 9 06:33:02 2024 +0800

    add cuda

diff --git a/.gitea/workflows/Flow.yaml b/.gitea/workflows/Flow.yaml
new file mode 100644
index 0000000..89a3a24
--- /dev/null
+++ b/.gitea/workflows/Flow.yaml
@@ -0,0 +1,52 @@
+name: Build
+
+on: [push]
+
+jobs:
+  CUDA_Base:
+    runs-on: runner
+    steps:
+      - name: checkout code
+        uses: https://eoelab.org:1027/actions/checkout@v4
+      - name: CUDA_Base build
+        uses: https://eoelab.org:1027/actions/build-push-action@v6
+        with:
+          context: cuda/base
+          tags: eoelab.org:1027/${{ gitea.repository }}:cuda_base
+  
+  CUDA_Runtime:
+    runs-on: runner
+    needs: CUDA_Base
+    steps:
+      - name: checkout code
+        uses: https://eoelab.org:1027/actions/checkout@v4
+      - name: CUDA_Runtime build
+        uses: https://eoelab.org:1027/actions/build-push-action@v6
+        with:
+          context: cuda/runtime
+          tags: eoelab.org:1027/${{ gitea.repository }}:cuda_runtime
+
+  CUDA_Devel:
+    runs-on: runner
+    needs: CUDA_Runtime
+    steps:
+      - name: checkout code
+        uses: https://eoelab.org:1027/actions/checkout@v4
+      - name: CUDA_Runtime build
+        uses: https://eoelab.org:1027/actions/build-push-action@v6
+        with:
+          context: cuda/devel
+          tags: eoelab.org:1027/${{ gitea.repository }}:cuda_devel
+
+  Push:
+    runs-on: runner
+    needs: [CUDA_Devel]
+    steps:
+      - name: login gitea_registry
+        uses: https://eoelab.org:1027/actions/login-action@v3
+        with:
+          registry: eoelab.org:1027
+          username: ${{ secrets.USERNAME }}
+          password: ${{ secrets.PASSWD }}
+      - name: push images to gitea_registry
+        run: docker image push "eoelab.org:1027/${{ gitea.repository }}" --all-tags
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..abcc2ed
--- /dev/null
+++ b/README.md
@@ -0,0 +1,23 @@
+# Denv
+English | [中文](README_CN.md)
+
+**THIS doc for non-CN USER**
+
+**dockerhub: https://hub.docker.com/r/ben0i0d/gpu**
+
+**Our Gitea: https://eoelab.org:1027/ben0i0d/gpu**
+
+## Upstream
+* NVIDIA: https://gitlab.com/nvidia/container-images/cuda
+* ROCM: https://github.com/ROCm/ROCm-docker
+
+## Platform
+* OS : debian 12
+* ARCH : x86_64
+
+## Version
+* cuda: 12.4
+* rocm: 6.2
+
+**Mirror source**
+* apt ustc:https://mirrors.ustc.edu.cn/help/debian.html
\ No newline at end of file
diff --git a/README_CN.md b/README_CN.md
new file mode 100644
index 0000000..3a7554d
--- /dev/null
+++ b/README_CN.md
@@ -0,0 +1,25 @@
+# nvidia-cuda-image
+English | [中文](README_CN.md)
+
+**为中国用户的文档**
+
+**dockerhub: https://hub.docker.com/r/ben0i0d/gpu**
+
+**Gitea: https://eoelab.org:1027/ben0i0d/gpu**
+
+## 上游
+* NVIDIA: https://gitlab.com/nvidia/container-images/cuda
+* ROCM: https://github.com/ROCm/ROCm-docker
+
+## 目标平台
+* OS : debian 12(bookworm)
+* ARCH : x86_64
+
+## 版本
+* cuda: 12.4
+* rocm: 6.2
+
+## 镜像源
+* apt ustc:https://mirrors.ustc.edu.cn/help/debian.html
+
+
diff --git a/cuda/base/Dockerfile b/cuda/base/Dockerfile
new file mode 100644
index 0000000..cf848bb
--- /dev/null
+++ b/cuda/base/Dockerfile
@@ -0,0 +1,25 @@
+FROM debian:bookworm-slim
+
+ARG NV_CUDA_CUDART_VERSION=12.4.127-1 \
+    NV_CUDA_COMPAT_PACKAGE=cuda-compat-12-4 \
+    NVARCH=x86_64 
+
+# nvidia-container-runtime
+ENV NVIDIA_VISIBLE_DEVICES=all \
+    NVIDIA_DRIVER_CAPABILITIES=compute,utility \
+    PATH=/usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} \
+    LD_LIBRARY_PATH=/usr/local/nvidia/lib:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH}
+
+RUN sed -i 's/deb.debian.org/mirrors.ustc.edu.cn/g' /etc/apt/sources.list.d/debian.sources && \
+    apt-get update && apt-get install -y --no-install-recommends gnupg2 curl ca-certificates && \
+    curl -fsSLO https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/${NVARCH}/cuda-keyring_1.1-1_all.deb && \
+    dpkg -i cuda-keyring_1.0-1_all.deb && rm cuda-keyring_1.0-1_all.deb && \
+# # For libraries in the cuda-compat-* package: https://docs.nvidia.com/cuda/eula/index.html#attachment-a
+    apt-get update && apt-get install -y --no-install-recommends \
+        cuda-cudart-12-4=${NV_CUDA_CUDART_VERSION} \
+        ${NV_CUDA_COMPAT_PACKAGE} && \
+    apt-get clean && rm -rf /var/lib/apt/lists/* && \
+# Required for nvidia-docker v1
+    echo "/usr/local/nvidia/lib" >> /etc/ld.so.conf.d/nvidia.conf && \
+    echo "/usr/local/nvidia/lib64" >> /etc/ld.so.conf.d/nvidia.conf
+    
diff --git a/cuda/devel/Dockerfile b/cuda/devel/Dockerfile
new file mode 100644
index 0000000..fe18e74
--- /dev/null
+++ b/cuda/devel/Dockerfile
@@ -0,0 +1,31 @@
+FROM eoelab.org:1027/ben0i0d/gpu:cuda_runtime
+
+ARG NV_CUDA_CUDART_DEV_VERSION = 12.4.127-1 \
+    NV_NVML_DEV_VERSION = 12.4.127-1 \
+    NV_LIBCUSPARSE_DEV_VERSION = 12.3.1.170-1 \
+    NV_LIBNPP_DEV_PACKAGE = "libnpp-dev-12-4=12.2.5.30-1" \
+    NV_LIBCUBLAS_DEV_PACKAGE_NAME = libcublas-dev-12-4 \
+    NV_LIBCUBLAS_DEV_PACKAGE = "libcublas-dev-12-4=12.4.5.8-1" \
+    NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE = "cuda-nsight-compute-12-4=12.4.1-1" \
+    NV_NVPROF_DEV_PACKAGE = "cuda-nvprof-12-4=12.4.127-1" \
+    NV_LIBNCCL_DEV_PACKAGE_NAME = libnccl-dev \
+    NV_LIBNCCL_DEV_PACKAGE = "libnccl-dev=2.21.5-1+cuda12.4"
+
+ENV LIBRARY_PATH=/usr/local/cuda/lib64/stubs:$LIBRARY_PATH
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+        cuda-cudart-dev-12-4=${NV_CUDA_CUDART_DEV_VERSION} \
+        cuda-command-line-tools-12-4=${NV_CUDA_LIB_VERSION} \
+        cuda-minimal-build-12-4=${NV_CUDA_LIB_VERSION} \
+        cuda-libraries-dev-12-4=${NV_CUDA_LIB_VERSION} \
+        cuda-nvml-dev-12-4=${NV_NVML_DEV_VERSION} \
+        ${NV_NVPROF_DEV_PACKAGE} \
+        ${NV_LIBNPP_DEV_PACKAGE} \
+        libcusparse-dev-12-4=${NV_LIBCUSPARSE_DEV_VERSION} \
+        ${NV_LIBCUBLAS_DEV_PACKAGE} \
+        ${NV_LIBNCCL_DEV_PACKAGE} \
+        ${NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE} &&\
+
+    apt-get clean && rm -rf /var/lib/apt/lists/*  && \
+# Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88
+    apt-mark hold ${NV_LIBCUBLAS_DEV_PACKAGE_NAME} ${NV_LIBNCCL_DEV_PACKAGE_NAME}
diff --git a/cuda/runtime/Dockerfile b/cuda/runtime/Dockerfile
new file mode 100644
index 0000000..3c7fe0b
--- /dev/null
+++ b/cuda/runtime/Dockerfile
@@ -0,0 +1,27 @@
+FROM eoelab.org:1027/ben0i0d/gpu:cuda_base
+
+ARG NV_NVTX_VERSION = 12.4.127-1 \
+    NV_LIBNPP_PACKAGE = "libnpp-12-4=12.2.5.30-1" \
+    NV_LIBCUSPARSE_VERSION = 12.3.1.170-1 \
+    NV_LIBCUBLAS_PACKAGE_NAME = libcublas-12-4 \
+    NV_LIBCUBLAS_PACKAGE = "libcublas-12-4=12.4.5.8-1" \
+    NV_LIBNCCL_PACKAGE_NAME = libnccl2 \
+    NV_LIBNCCL_PACKAGE = "libnccl2=2.21.5-1+cuda12.4"
+
+# Add entrypoint items
+COPY entrypoint.d/ /opt/nvidia/entrypoint.d/
+COPY nvidia_entrypoint.sh /opt/nvidia/
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+        cuda-libraries-12-4=${NV_CUDA_LIB_VERSION} \
+        ${NV_LIBNPP_PACKAGE} \
+        cuda-nvtx-12-4=${NV_NVTX_VERSION} \
+        libcusparse-12-4=${NV_LIBCUSPARSE_VERSION} \
+        ${NV_LIBCUBLAS_PACKAGE} \
+        ${NV_LIBNCCL_PACKAGE} && \
+    apt-get clean && rm -rf /var/lib/apt/lists/*  && \
+
+    # Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88
+    apt-mark hold ${NV_LIBCUBLAS_PACKAGE_NAME} ${NV_LIBNCCL_PACKAGE_NAME}
+
+ENTRYPOINT ["/opt/nvidia/nvidia_entrypoint.sh"]
\ No newline at end of file
diff --git a/cuda/runtime/entrypoint.d/10-banner.sh b/cuda/runtime/entrypoint.d/10-banner.sh
new file mode 100644
index 0000000..704c477
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/10-banner.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+_banner_file="${BASH_SOURCE[0]/%.sh/.txt}"
+
+# 10-banner.sh allows itself to be skipped if there exists a
+# 10-banner.txt, which will be cat'd next alphabetically
+if [[ ! -f "${_banner_file}" && -n "${NVIDIA_PRODUCT_NAME}" ]]; then
+  echo
+  print_banner_text "=" "${NVIDIA_PRODUCT_NAME}"
+fi
diff --git a/cuda/runtime/entrypoint.d/12-banner.sh b/cuda/runtime/entrypoint.d/12-banner.sh
new file mode 100644
index 0000000..878be43
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/12-banner.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+_prodname_uc=$(echo "${NVIDIA_PRODUCT_NAME}" | tr [:lower:] [:upper:] | sed 's/ /_/g' | sed 's/^NVIDIA_//')  # Product name
+_compver="${_prodname_uc}_VERSION"        # Upstream component version variable name
+
+echo
+[ -n "${!_compver}" ] && echo "${NVIDIA_PRODUCT_NAME} Version ${!_compver}"
diff --git a/cuda/runtime/entrypoint.d/15-container-copyright.txt b/cuda/runtime/entrypoint.d/15-container-copyright.txt
new file mode 100644
index 0000000..33a5751
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/15-container-copyright.txt
@@ -0,0 +1,2 @@
+
+Container image Copyright (c) 2016-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
diff --git a/cuda/runtime/entrypoint.d/30-container-license.txt b/cuda/runtime/entrypoint.d/30-container-license.txt
new file mode 100644
index 0000000..0c61b70
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/30-container-license.txt
@@ -0,0 +1,6 @@
+
+This container image and its contents are governed by the NVIDIA Deep Learning Container License.
+By pulling and using the container, you accept the terms and conditions of this license:
+https://developer.nvidia.com/ngc/nvidia-deep-learning-container-license
+
+A copy of this license is made available in this container at /NGC-DL-CONTAINER-LICENSE for your convenience.
diff --git a/cuda/runtime/entrypoint.d/50-gpu-driver-check.sh b/cuda/runtime/entrypoint.d/50-gpu-driver-check.sh
new file mode 100644
index 0000000..86ba39e
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/50-gpu-driver-check.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+# Copyright (c) 2017-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+# Check if libcuda.so.1 -- the CUDA driver -- is present in the ld.so cache or in LD_LIBRARY_PATH
+_LIBCUDA_FROM_LD_CACHE=$(ldconfig -p | grep libcuda.so.1)
+_LIBCUDA_FROM_LD_LIBRARY_PATH=$( ( IFS=: ; for i in ${LD_LIBRARY_PATH}; do ls $i/libcuda.so.1 2>/dev/null | grep -v compat; done) )
+_LIBCUDA_FOUND="${_LIBCUDA_FROM_LD_CACHE}${_LIBCUDA_FROM_LD_LIBRARY_PATH}"
+
+# Check if /dev/nvidiactl (like on Linux) or /dev/dxg (like on WSL2) or /dev/nvgpu (like on Tegra) is present
+_DRIVER_FOUND=$(ls /dev/nvidiactl /dev/dxg /dev/nvgpu 2>/dev/null)
+
+# If either is not true, then GPU functionality won't be usable.
+if [[ -z "${_LIBCUDA_FOUND}" || -z "${_DRIVER_FOUND}" ]]; then
+  echo
+  echo "WARNING: The NVIDIA Driver was not detected.  GPU functionality will not be available."
+  echo "   Use the NVIDIA Container Toolkit to start this container with GPU support; see"
+  echo "   https://docs.nvidia.com/datacenter/cloud-native/ ."
+  export NVIDIA_CPU_ONLY=1
+fi
diff --git a/cuda/runtime/entrypoint.d/80-internal-image.sh b/cuda/runtime/entrypoint.d/80-internal-image.sh
new file mode 100644
index 0000000..d6c6906
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/80-internal-image.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+internal_only_deprecation_notice() {
+    echo
+    print_banner_text "*" "PRIVATE IMAGE"
+    echo "This image is PRIVATE."
+    echo "FOR INTERNAL USE ONLY"
+    echo "DO NOT ship to external customers!"
+}
+
+[ -n "${NVIDIA_CUDA_INTERNAL}" ] && internal_only_deprecation_notice
diff --git a/cuda/runtime/entrypoint.d/90-deprecated-image.sh b/cuda/runtime/entrypoint.d/90-deprecated-image.sh
new file mode 100644
index 0000000..3bd1c52
--- /dev/null
+++ b/cuda/runtime/entrypoint.d/90-deprecated-image.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+# Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+show_deprecation_notice() {
+    echo
+    print_banner_text "*" "DEPRECATION NOTICE!"
+    echo "THIS IMAGE IS DEPRECATED and is scheduled for DELETION."
+    echo "    https://gitlab.com/nvidia/container-images/cuda/blob/master/doc/support-policy.md"
+}
+
+[ -n "${NVIDIA_CUDA_END_OF_LIFE}" ] && show_deprecation_notice
diff --git a/cuda/runtime/nvidia_entrypoint.sh b/cuda/runtime/nvidia_entrypoint.sh
new file mode 100755
index 0000000..a177841
--- /dev/null
+++ b/cuda/runtime/nvidia_entrypoint.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+# Copyright (c) 2016-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above copyright
+#    notice, this list of conditions and the following disclaimer in the
+#    documentation and/or other materials provided with the distribution.
+#  * Neither the name of NVIDIA CORPORATION nor the names of its
+#    contributors may be used to endorse or promote products derived
+#    from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Gather parts in alpha order
+shopt -s nullglob extglob
+_SCRIPT_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")"
+declare -a _PARTS=( "${_SCRIPT_DIR}/entrypoint.d"/*@(.txt|.sh) )
+shopt -u nullglob extglob
+
+print_repeats() {
+  local -r char="$1" count="$2"
+  local i
+  for ((i=1; i<=$count; i++)); do echo -n "$char"; done
+  echo
+}
+
+print_banner_text() {
+  # $1: Banner char
+  # $2: Text
+  local banner_char=$1
+  local -r text="$2"
+  local pad="${banner_char}${banner_char}"
+  print_repeats "${banner_char}" $((${#text} + 6))
+  echo "${pad} ${text} ${pad}"
+  print_repeats "${banner_char}" $((${#text} + 6))
+}
+
+# Execute the entrypoint parts
+for _file in "${_PARTS[@]}"; do
+  case "${_file}" in
+    *.txt) cat "${_file}";;
+    *.sh)  source "${_file}";;
+  esac
+done
+
+echo
+
+# This script can either be a wrapper around arbitrary command lines,
+# or it will simply exec bash if no arguments were given
+if [[ $# -eq 0 ]]; then
+  exec "/bin/bash"
+else
+  exec "$@"
+fi