mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-10 07:46:59 +00:00
Changes: - Update tesla-k80-ci.yml to upload build/lib/ollama/ containing CUDA backend - Remove all LD_LIBRARY_PATH environment variables (no longer needed with RPATH) - Test workflows now receive libggml-cuda.so enabling GPU offload This fixes the issue where test workflows couldn't offload to GPU because the CUDA backend library wasn't included in the artifact. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
54 lines
1.3 KiB
YAML
54 lines
1.3 KiB
YAML
name: Tesla K80 Build
|
|
|
|
on:
|
|
workflow_dispatch: # Manual trigger only
|
|
|
|
jobs:
|
|
build:
|
|
runs-on: self-hosted
|
|
|
|
# Use specific labels if you want to target a particular self-hosted runner
|
|
# runs-on: [self-hosted, linux, cuda, tesla-k80]
|
|
|
|
timeout-minutes: 60 # Prevent hung jobs
|
|
|
|
steps:
|
|
- name: Checkout code
|
|
uses: actions/checkout@v4
|
|
with:
|
|
fetch-depth: 0 # Full history for accurate versioning
|
|
|
|
- name: Clean previous build
|
|
run: |
|
|
rm -rf build
|
|
rm -f ollama
|
|
|
|
- name: Configure CMake
|
|
run: |
|
|
CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ cmake -B build
|
|
env:
|
|
CMAKE_BUILD_TYPE: Release
|
|
|
|
- name: Build C++/CUDA components
|
|
run: |
|
|
CC=/usr/local/bin/gcc CXX=/usr/local/bin/g++ cmake --build build -j$(nproc)
|
|
timeout-minutes: 30
|
|
|
|
- name: Build Go binary
|
|
run: |
|
|
go build -v -o ollama .
|
|
|
|
- name: Verify binary was created
|
|
run: |
|
|
ls -lh ollama
|
|
./ollama --version
|
|
|
|
- name: Upload ollama binary and libraries as artifact
|
|
uses: actions/upload-artifact@v4
|
|
with:
|
|
name: ollama-binary
|
|
path: |
|
|
ollama
|
|
build/lib/ollama/
|
|
retention-days: 7
|