From 45726333651961576a1ac9ebe32d9bd3fdc920b0 Mon Sep 17 00:00:00 2001
From: pdch <3318195572@qq,com>
Date: Mon, 23 Dec 2024 15:57:27 +0800
Subject: [PATCH] add llama-server
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

更新 Dockerfile,修正工作目录路径并添加健康检查和入口点
---
 .gitea/workflows/Flow.yaml | 14 ++++++++++++++
 llama/cpu/Dockerfile       | 25 +++++++++++++++++++++++++
 2 files changed, 39 insertions(+)
 create mode 100644 llama/cpu/Dockerfile

diff --git a/.gitea/workflows/Flow.yaml b/.gitea/workflows/Flow.yaml
index 5e47086..0003bde 100644
--- a/.gitea/workflows/Flow.yaml
+++ b/.gitea/workflows/Flow.yaml
@@ -274,6 +274,20 @@ jobs:
           context: GS/steam/steam_wine
           tags: eoelab.org:1027/${{ gitea.repository }}:steam_wine
 
+  llama_cpu:
+    runs-on: runner
+    steps:
+      - name: checkout code
+        uses: https://eoelab.org:1027/actions/checkout@v4
+      - name: llama_cpu build
+        uses: https://eoelab.org:1027/actions/build-push-action@v6
+        with:
+          context: llama/cpu
+          build-args: |
+            http_proxy=http://192.168.2.2:7890
+            https_proxy=http://192.168.2.2:7890
+          tags: eoelab.org:1027/${{ gitea.repository }}:base
+
   Push:
     runs-on: runner
     needs: [Steam_Wine,CUDA_Devel]
diff --git a/llama/cpu/Dockerfile b/llama/cpu/Dockerfile
new file mode 100644
index 0000000..7e6e0f8
--- /dev/null
+++ b/llama/cpu/Dockerfile
@@ -0,0 +1,25 @@
+FROM eoelab.org:1027/ben0i0d/cenv:base
+
+USER root
+
+RUN apt-get update --yes && apt-get install --yes gcc gdb cmake build-essential git libcurl4-openssl-dev && \
+    apt-get clean && rm -rf /var/lib/apt/lists/*
+
+WORKDIR "${HOME}"
+
+# install llama
+RUN git clone https://github.com/ggerganov/llama.cpp.git
+
+WORKDIR /llama.cpp
+
+RUN cmake -S . -B build -DGGML_BACKEND_DL=ON -DGGML_NATIVE=OFF -DGGML_CPU_ALL_VARIANTS=ON -DLLAMA_CURL=ON -DCMAKE_BUILD_TYPE=Release -DBUILD_SHARED_LIBS=OFF && \
+    cmake --build build -j $(nproc)
+
+WORKDIR "${HOME}"
+
+RUN mv /llama.cpp/build/bin/llama-server /usr/local/bin/llama-server && \
+    rm -rf /llama.cpp
+
+HEALTHCHECK CMD [ "curl", "-f", "http://localhost:8080/health" ]
+
+ENTRYPOINT [ "llama-server" ]
\ No newline at end of file