mirror of
https://github.com/dogkeeper886/ollama37.git
synced 2025-12-12 00:37:04 +00:00
Revive windows build
The windows native setup still needs some more work, but this gets it building again and if you set the PATH properly, you can run the resulting exe on a cuda system.
This commit is contained in:
@@ -22,8 +22,8 @@ package llm
|
||||
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/libllama.a
|
||||
#cgo linux LDFLAGS: ${SRCDIR}/llama.cpp/gguf/build/cpu/libggml_static.a
|
||||
#cgo linux LDFLAGS: -lrt -lpthread -ldl -lstdc++ -lm
|
||||
#cgo windows LDFLAGS: -L${SRCDIR}/llama.cpp/gguf/build/wincuda/dist/bin
|
||||
#cgo windows LDFLAGS: -lext_server_shared -lpthread
|
||||
#cgo windows LDFLAGS: -L${SRCDIR}/llama.cpp/gguf/build/wincpu/dist/lib
|
||||
#cgo windows LDFLAGS: -lcpu_server -lpthread
|
||||
|
||||
#include <stdlib.h>
|
||||
#include "server.h"
|
||||
|
||||
Reference in New Issue
Block a user