mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-11 16:26:59 +00:00
Use build tags to generate accelerated binaries for CUDA and ROCm on Linux.
The build tags rocm or cuda must be specified to both go generate and go build. ROCm builds should have both ROCM_PATH set (and the ROCM SDK present) as well as CLBlast installed (for GGML) and CLBlast_DIR set in the environment to the CLBlast cmake directory (likely /usr/lib/cmake/CLBlast). Build tags are also used to switch VRAM detection between cuda and rocm implementations, using added "accelerator_foo.go" files which contain architecture specific functions and variables. accelerator_none is used when no tags are set, and a helper function addRunner will ignore it if it is the chosen accelerator. Fix go generate commands, thanks @deadmeu for testing.
This commit is contained in:
67
llm/accelerator_cuda.go
Normal file
67
llm/accelerator_cuda.go
Normal file
@@ -0,0 +1,67 @@
|
||||
//go:build cuda
|
||||
|
||||
package llm
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/jmorganca/ollama/format"
|
||||
)
|
||||
|
||||
var (
|
||||
errNvidiaSMI = errors.New("warning: gpu support may not be enabled, check that you have installed GPU drivers: nvidia-smi command failed")
|
||||
errAvailableVRAM = errors.New("not enough VRAM available, falling back to CPU only")
|
||||
)
|
||||
|
||||
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
|
||||
func acceleratedRunner(buildPath string) []ModelRunner {
|
||||
return []ModelRunner{
|
||||
ModelRunner{
|
||||
Path: path.Join(buildPath, "cuda", "bin", "ollama-runner"),
|
||||
Accelerated: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CheckVRAM returns the free VRAM in bytes on Linux machines with NVIDIA GPUs
|
||||
func CheckVRAM() (int64, error) {
|
||||
cmd := exec.Command("nvidia-smi", "--query-gpu=memory.free", "--format=csv,noheader,nounits")
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return 0, errNoAccel
|
||||
}
|
||||
|
||||
var freeMiB int64
|
||||
scanner := bufio.NewScanner(&stdout)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Contains(line, "[Insufficient Permissions]") {
|
||||
return 0, fmt.Errorf("GPU support may not enabled, check you have installed GPU drivers and have the necessary permissions to run nvidia-smi")
|
||||
}
|
||||
|
||||
vram, err := strconv.ParseInt(strings.TrimSpace(line), 10, 64)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
|
||||
}
|
||||
|
||||
freeMiB += vram
|
||||
}
|
||||
|
||||
freeBytes := freeMiB * 1024 * 1024
|
||||
if freeBytes < 2*format.GigaByte {
|
||||
log.Printf("less than 2 GB VRAM available")
|
||||
return 0, errAvailableVRAM
|
||||
}
|
||||
|
||||
return freeBytes, nil
|
||||
}
|
||||
21
llm/accelerator_none.go
Normal file
21
llm/accelerator_none.go
Normal file
@@ -0,0 +1,21 @@
|
||||
//go:build !rocm && !cuda
|
||||
|
||||
package llm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoAccel = errors.New("no accelerator support in this binary")
|
||||
)
|
||||
|
||||
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
|
||||
func acceleratedRunner(buildPath string) []ModelRunner {
|
||||
return make([]ModelRunner, 0, 1)
|
||||
}
|
||||
|
||||
// CheckVRAM is a stub with no accelerator.
|
||||
func CheckVRAM() (int64, error) {
|
||||
return 0, errNoGPU
|
||||
}
|
||||
85
llm/accelerator_rocm.go
Normal file
85
llm/accelerator_rocm.go
Normal file
@@ -0,0 +1,85 @@
|
||||
//go:build rocm
|
||||
|
||||
package llm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var errNoAccel = errors.New("rocm-smi command failed")
|
||||
|
||||
// acceleratedRunner returns the runner for this accelerator given the provided buildPath string.
|
||||
func acceleratedRunner(buildPath string) []ModelRunner {
|
||||
return []ModelRunner{
|
||||
ModelRunner{
|
||||
Path: path.Join(buildPath, "rocm", "bin", "ollama-runner"),
|
||||
Accelerated: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CheckVRAM returns the available VRAM in MiB on Linux machines with AMD GPUs
|
||||
func CheckVRAM() (int64, error) {
|
||||
rocmHome := os.Getenv("ROCM_PATH")
|
||||
if rocmHome == "" {
|
||||
rocmHome = os.Getenv("ROCM_HOME")
|
||||
}
|
||||
if rocmHome == "" {
|
||||
log.Println("warning: ROCM_PATH is not set. Trying a likely fallback path, but it is recommended to set this variable in the environment.")
|
||||
rocmHome = "/opt/rocm"
|
||||
}
|
||||
cmd := exec.Command(filepath.Join(rocmHome, "bin/rocm-smi"), "--showmeminfo", "VRAM", "--csv")
|
||||
var stdout bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return 0, errNoAccel
|
||||
}
|
||||
csvData := csv.NewReader(&stdout)
|
||||
// llama.cpp or ROCm don't seem to understand splitting the VRAM allocations across them properly, so try to find the biggest card instead :(. FIXME.
|
||||
totalBiggestCard := int64(0)
|
||||
bigCardName := ""
|
||||
for {
|
||||
record, err := csvData.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse available VRAM: %v", err)
|
||||
}
|
||||
if !strings.HasPrefix(record[0], "card") {
|
||||
continue
|
||||
}
|
||||
cardTotal, err := strconv.ParseInt(record[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cardUsed, err := strconv.ParseInt(record[2], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
possible := (cardTotal - cardUsed)
|
||||
log.Printf("ROCm found %d MiB of available VRAM on device %q", possible/1024/1024, record[0])
|
||||
if possible > totalBiggestCard {
|
||||
totalBiggestCard = possible
|
||||
bigCardName = record[0]
|
||||
}
|
||||
}
|
||||
if totalBiggestCard == 0 {
|
||||
log.Printf("found ROCm GPU but failed to parse free VRAM!")
|
||||
return 0, errNoAccel
|
||||
}
|
||||
log.Printf("ROCm selecting device %q", bigCardName)
|
||||
return totalBiggestCard, nil
|
||||
}
|
||||
24
llm/llama.cpp/generate_linux_cuda.go
Normal file
24
llm/llama.cpp/generate_linux_cuda.go
Normal file
@@ -0,0 +1,24 @@
|
||||
//go:build cuda
|
||||
|
||||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
||||
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
|
||||
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
|
||||
//go:generate rm -rf ggml/build/cuda
|
||||
//go:generate cmake -S ggml -B ggml/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build ggml/build/cuda --target server --config Release
|
||||
//go:generate mv ggml/build/cuda/bin/server ggml/build/cuda/bin/ollama-runner
|
||||
|
||||
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
||||
|
||||
//go:generate rm -rf gguf/build/cuda
|
||||
//go:generate cmake -S gguf -B gguf/build/cuda -DLLAMA_CUBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off
|
||||
//go:generate cmake --build gguf/build/cuda --target server --config Release
|
||||
//go:generate mv gguf/build/cuda/bin/server gguf/build/cuda/bin/ollama-runner
|
||||
25
llm/llama.cpp/generate_linux_rocm.go
Normal file
25
llm/llama.cpp/generate_linux_rocm.go
Normal file
@@ -0,0 +1,25 @@
|
||||
//go:build rocm
|
||||
|
||||
package llm
|
||||
|
||||
//go:generate git submodule init
|
||||
|
||||
//go:generate git submodule update --force ggml
|
||||
//go:generate git -C ggml apply ../patches/0001-add-detokenize-endpoint.patch
|
||||
//go:generate git -C ggml apply ../patches/0002-34B-model-support.patch
|
||||
//go:generate git -C ggml apply ../patches/0005-ggml-support-CUDA-s-half-type-for-aarch64-1455-2670.patch
|
||||
//go:generate git -C ggml apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
|
||||
//go:generate git submodule update --force gguf
|
||||
//go:generate git -C gguf apply ../patches/0001-copy-cuda-runtime-libraries.patch
|
||||
//go:generate git -C gguf apply ../patches/0001-update-default-log-target.patch
|
||||
|
||||
//go:generate rm -rf ggml/build/rocm
|
||||
//go:generate cmake -S ggml -B ggml/build/rocm -DLLAMA_CLBLAST=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on
|
||||
//go:generate cmake --build ggml/build/rocm --target server --config Release
|
||||
//go:generate mv ggml/build/rocm/bin/server ggml/build/rocm/bin/ollama-runner
|
||||
|
||||
//go:generate rm -rf gguf/build/rocm
|
||||
//go:generate cmake -S gguf -B gguf/build/rocm -DLLAMA_HIPBLAS=on -DLLAMA_ACCELERATE=on -DLLAMA_K_QUANTS=on -DLLAMA_NATIVE=off -DLLAMA_AVX=on -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_FMA=off -DLLAMA_F16C=off -DCMAKE_C_COMPILER=$ROCM_PATH/llvm/bin/clang -DCMAKE_CXX_COMPILER=$ROCM_PATH/llvm/bin/clang++ -DAMDGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102' -DGPU_TARGETS='gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102'
|
||||
//go:generate cmake --build gguf/build/rocm --target server --config Release
|
||||
//go:generate mv gguf/build/rocm/bin/server gguf/build/rocm/bin/ollama-runner
|
||||
Reference in New Issue
Block a user