mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-11 00:07:07 +00:00
Always dynamically load the llm server library
This switches darwin to dynamic loading, and refactors the code now that no static linking of the library is used on any platform
This commit is contained in:
@@ -32,8 +32,15 @@ func CheckVRAM() (int64, error) {
|
||||
|
||||
func GetGPUInfo() GpuInfo {
|
||||
mem, _ := getCPUMem()
|
||||
if runtime.GOARCH == "amd64" {
|
||||
return GpuInfo{
|
||||
Library: "default",
|
||||
Variant: GetCPUVariant(),
|
||||
memInfo: mem,
|
||||
}
|
||||
}
|
||||
return GpuInfo{
|
||||
Library: "default",
|
||||
Library: "metal",
|
||||
memInfo: mem,
|
||||
}
|
||||
}
|
||||
@@ -45,12 +52,3 @@ func getCPUMem() (memInfo, error) {
|
||||
DeviceCount: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func nativeInit() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetCPUVariant() string {
|
||||
// We don't yet have CPU based builds for Darwin...
|
||||
return ""
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user